qlge: bugfix: Add missing dev_kfree_skb_any() call.
[safe/jmp/linux-2.6] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/if_vlan.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |    */
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61     NETIF_MSG_TX_QUEUED |
62     NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66 static int debug = 0x00007fff;  /* defaults above */
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int irq_type = MSIX_IRQ;
74 module_param(irq_type, int, MSIX_IRQ);
75 MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
79         /* required last entry */
80         {0,}
81 };
82
83 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85 /* This hardware semaphore causes exclusive access to
86  * resources shared between the NIC driver, MPI firmware,
87  * FCOE firmware and the FC driver.
88  */
89 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90 {
91         u32 sem_bits = 0;
92
93         switch (sem_mask) {
94         case SEM_XGMAC0_MASK:
95                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96                 break;
97         case SEM_XGMAC1_MASK:
98                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99                 break;
100         case SEM_ICB_MASK:
101                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102                 break;
103         case SEM_MAC_ADDR_MASK:
104                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105                 break;
106         case SEM_FLASH_MASK:
107                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108                 break;
109         case SEM_PROBE_MASK:
110                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111                 break;
112         case SEM_RT_IDX_MASK:
113                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114                 break;
115         case SEM_PROC_REG_MASK:
116                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117                 break;
118         default:
119                 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120                 return -EINVAL;
121         }
122
123         ql_write32(qdev, SEM, sem_bits | sem_mask);
124         return !(ql_read32(qdev, SEM) & sem_bits);
125 }
126
127 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128 {
129         unsigned int wait_count = 30;
130         do {
131                 if (!ql_sem_trylock(qdev, sem_mask))
132                         return 0;
133                 udelay(100);
134         } while (--wait_count);
135         return -ETIMEDOUT;
136 }
137
138 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139 {
140         ql_write32(qdev, SEM, sem_mask);
141         ql_read32(qdev, SEM);   /* flush */
142 }
143
144 /* This function waits for a specific bit to come ready
145  * in a given register.  It is used mostly by the initialize
146  * process, but is also used in kernel thread API such as
147  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148  */
149 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150 {
151         u32 temp;
152         int count = UDELAY_COUNT;
153
154         while (count) {
155                 temp = ql_read32(qdev, reg);
156
157                 /* check for errors */
158                 if (temp & err_bit) {
159                         QPRINTK(qdev, PROBE, ALERT,
160                                 "register 0x%.08x access error, value = 0x%.08x!.\n",
161                                 reg, temp);
162                         return -EIO;
163                 } else if (temp & bit)
164                         return 0;
165                 udelay(UDELAY_DELAY);
166                 count--;
167         }
168         QPRINTK(qdev, PROBE, ALERT,
169                 "Timed out waiting for reg %x to come ready.\n", reg);
170         return -ETIMEDOUT;
171 }
172
173 /* The CFG register is used to download TX and RX control blocks
174  * to the chip. This function waits for an operation to complete.
175  */
176 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177 {
178         int count = UDELAY_COUNT;
179         u32 temp;
180
181         while (count) {
182                 temp = ql_read32(qdev, CFG);
183                 if (temp & CFG_LE)
184                         return -EIO;
185                 if (!(temp & bit))
186                         return 0;
187                 udelay(UDELAY_DELAY);
188                 count--;
189         }
190         return -ETIMEDOUT;
191 }
192
193
194 /* Used to issue init control blocks to hw. Maps control block,
195  * sets address, triggers download, waits for completion.
196  */
197 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198                  u16 q_id)
199 {
200         u64 map;
201         int status = 0;
202         int direction;
203         u32 mask;
204         u32 value;
205
206         direction =
207             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208             PCI_DMA_FROMDEVICE;
209
210         map = pci_map_single(qdev->pdev, ptr, size, direction);
211         if (pci_dma_mapping_error(qdev->pdev, map)) {
212                 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213                 return -ENOMEM;
214         }
215
216         status = ql_wait_cfg(qdev, bit);
217         if (status) {
218                 QPRINTK(qdev, IFUP, ERR,
219                         "Timed out waiting for CFG to come ready.\n");
220                 goto exit;
221         }
222
223         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
224         if (status)
225                 goto exit;
226         ql_write32(qdev, ICB_L, (u32) map);
227         ql_write32(qdev, ICB_H, (u32) (map >> 32));
228         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
229
230         mask = CFG_Q_MASK | (bit << 16);
231         value = bit | (q_id << CFG_Q_SHIFT);
232         ql_write32(qdev, CFG, (mask | value));
233
234         /*
235          * Wait for the bit to clear after signaling hw.
236          */
237         status = ql_wait_cfg(qdev, bit);
238 exit:
239         pci_unmap_single(qdev->pdev, map, size, direction);
240         return status;
241 }
242
243 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
244 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
245                         u32 *value)
246 {
247         u32 offset = 0;
248         int status;
249
250         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
251         if (status)
252                 return status;
253         switch (type) {
254         case MAC_ADDR_TYPE_MULTI_MAC:
255         case MAC_ADDR_TYPE_CAM_MAC:
256                 {
257                         status =
258                             ql_wait_reg_rdy(qdev,
259                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
260                         if (status)
261                                 goto exit;
262                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
263                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
264                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
265                         status =
266                             ql_wait_reg_rdy(qdev,
267                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
268                         if (status)
269                                 goto exit;
270                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
271                         status =
272                             ql_wait_reg_rdy(qdev,
273                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
274                         if (status)
275                                 goto exit;
276                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
278                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279                         status =
280                             ql_wait_reg_rdy(qdev,
281                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
282                         if (status)
283                                 goto exit;
284                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
286                                 status =
287                                     ql_wait_reg_rdy(qdev,
288                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                                 if (status)
290                                         goto exit;
291                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                                 status =
295                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
296                                                     MAC_ADDR_MR, 0);
297                                 if (status)
298                                         goto exit;
299                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         }
301                         break;
302                 }
303         case MAC_ADDR_TYPE_VLAN:
304         case MAC_ADDR_TYPE_MULTI_FLTR:
305         default:
306                 QPRINTK(qdev, IFUP, CRIT,
307                         "Address type %d not yet supported.\n", type);
308                 status = -EPERM;
309         }
310 exit:
311         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
312         return status;
313 }
314
315 /* Set up a MAC, multicast or VLAN address for the
316  * inbound frame matching.
317  */
318 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
319                                u16 index)
320 {
321         u32 offset = 0;
322         int status = 0;
323
324         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
325         if (status)
326                 return status;
327         switch (type) {
328         case MAC_ADDR_TYPE_MULTI_MAC:
329         case MAC_ADDR_TYPE_CAM_MAC:
330                 {
331                         u32 cam_output;
332                         u32 upper = (addr[0] << 8) | addr[1];
333                         u32 lower =
334                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
335                             (addr[5]);
336
337                         QPRINTK(qdev, IFUP, INFO,
338                                 "Adding %s address %pM"
339                                 " at index %d in the CAM.\n",
340                                 ((type ==
341                                   MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
342                                  "UNICAST"), addr, index);
343
344                         status =
345                             ql_wait_reg_rdy(qdev,
346                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
347                         if (status)
348                                 goto exit;
349                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
350                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
351                                    type);       /* type */
352                         ql_write32(qdev, MAC_ADDR_DATA, lower);
353                         status =
354                             ql_wait_reg_rdy(qdev,
355                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
356                         if (status)
357                                 goto exit;
358                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
359                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
360                                    type);       /* type */
361                         ql_write32(qdev, MAC_ADDR_DATA, upper);
362                         status =
363                             ql_wait_reg_rdy(qdev,
364                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
365                         if (status)
366                                 goto exit;
367                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
368                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
369                                    type);       /* type */
370                         /* This field should also include the queue id
371                            and possibly the function id.  Right now we hardcode
372                            the route field to NIC core.
373                          */
374                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
375                                 cam_output = (CAM_OUT_ROUTE_NIC |
376                                               (qdev->
377                                                func << CAM_OUT_FUNC_SHIFT) |
378                                               (qdev->
379                                                rss_ring_first_cq_id <<
380                                                CAM_OUT_CQ_ID_SHIFT));
381                                 if (qdev->vlgrp)
382                                         cam_output |= CAM_OUT_RV;
383                                 /* route to NIC core */
384                                 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
385                         }
386                         break;
387                 }
388         case MAC_ADDR_TYPE_VLAN:
389                 {
390                         u32 enable_bit = *((u32 *) &addr[0]);
391                         /* For VLAN, the addr actually holds a bit that
392                          * either enables or disables the vlan id we are
393                          * addressing. It's either MAC_ADDR_E on or off.
394                          * That's bit-27 we're talking about.
395                          */
396                         QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
397                                 (enable_bit ? "Adding" : "Removing"),
398                                 index, (enable_bit ? "to" : "from"));
399
400                         status =
401                             ql_wait_reg_rdy(qdev,
402                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403                         if (status)
404                                 goto exit;
405                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
406                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
407                                    type |       /* type */
408                                    enable_bit); /* enable/disable */
409                         break;
410                 }
411         case MAC_ADDR_TYPE_MULTI_FLTR:
412         default:
413                 QPRINTK(qdev, IFUP, CRIT,
414                         "Address type %d not yet supported.\n", type);
415                 status = -EPERM;
416         }
417 exit:
418         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
419         return status;
420 }
421
422 /* Get a specific frame routing value from the CAM.
423  * Used for debug and reg dump.
424  */
425 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
426 {
427         int status = 0;
428
429         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
430         if (status)
431                 goto exit;
432
433         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
434         if (status)
435                 goto exit;
436
437         ql_write32(qdev, RT_IDX,
438                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
439         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
440         if (status)
441                 goto exit;
442         *value = ql_read32(qdev, RT_DATA);
443 exit:
444         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
445         return status;
446 }
447
448 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
449  * to route different frame types to various inbound queues.  We send broadcast/
450  * multicast/error frames to the default queue for slow handling,
451  * and CAM hit/RSS frames to the fast handling queues.
452  */
453 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
454                               int enable)
455 {
456         int status;
457         u32 value = 0;
458
459         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
460         if (status)
461                 return status;
462
463         QPRINTK(qdev, IFUP, DEBUG,
464                 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
465                 (enable ? "Adding" : "Removing"),
466                 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
467                 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
468                 ((index ==
469                   RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
470                 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
471                 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
472                 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
473                 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
474                 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
475                 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
476                 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
477                 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
478                 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
479                 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
480                 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
481                 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
482                 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
483                 (enable ? "to" : "from"));
484
485         switch (mask) {
486         case RT_IDX_CAM_HIT:
487                 {
488                         value = RT_IDX_DST_CAM_Q |      /* dest */
489                             RT_IDX_TYPE_NICQ |  /* type */
490                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
491                         break;
492                 }
493         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
494                 {
495                         value = RT_IDX_DST_DFLT_Q |     /* dest */
496                             RT_IDX_TYPE_NICQ |  /* type */
497                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
498                         break;
499                 }
500         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
501                 {
502                         value = RT_IDX_DST_DFLT_Q |     /* dest */
503                             RT_IDX_TYPE_NICQ |  /* type */
504                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
505                         break;
506                 }
507         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
508                 {
509                         value = RT_IDX_DST_DFLT_Q |     /* dest */
510                             RT_IDX_TYPE_NICQ |  /* type */
511                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
512                         break;
513                 }
514         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
515                 {
516                         value = RT_IDX_DST_CAM_Q |      /* dest */
517                             RT_IDX_TYPE_NICQ |  /* type */
518                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
519                         break;
520                 }
521         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
522                 {
523                         value = RT_IDX_DST_CAM_Q |      /* dest */
524                             RT_IDX_TYPE_NICQ |  /* type */
525                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
526                         break;
527                 }
528         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
529                 {
530                         value = RT_IDX_DST_RSS |        /* dest */
531                             RT_IDX_TYPE_NICQ |  /* type */
532                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
533                         break;
534                 }
535         case 0:         /* Clear the E-bit on an entry. */
536                 {
537                         value = RT_IDX_DST_DFLT_Q |     /* dest */
538                             RT_IDX_TYPE_NICQ |  /* type */
539                             (index << RT_IDX_IDX_SHIFT);/* index */
540                         break;
541                 }
542         default:
543                 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
544                         mask);
545                 status = -EPERM;
546                 goto exit;
547         }
548
549         if (value) {
550                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
551                 if (status)
552                         goto exit;
553                 value |= (enable ? RT_IDX_E : 0);
554                 ql_write32(qdev, RT_IDX, value);
555                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
556         }
557 exit:
558         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
559         return status;
560 }
561
562 static void ql_enable_interrupts(struct ql_adapter *qdev)
563 {
564         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
565 }
566
567 static void ql_disable_interrupts(struct ql_adapter *qdev)
568 {
569         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
570 }
571
572 /* If we're running with multiple MSI-X vectors then we enable on the fly.
573  * Otherwise, we may have multiple outstanding workers and don't want to
574  * enable until the last one finishes. In this case, the irq_cnt gets
575  * incremented everytime we queue a worker and decremented everytime
576  * a worker finishes.  Once it hits zero we enable the interrupt.
577  */
578 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
579 {
580         u32 var = 0;
581         unsigned long hw_flags = 0;
582         struct intr_context *ctx = qdev->intr_context + intr;
583
584         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
585                 /* Always enable if we're MSIX multi interrupts and
586                  * it's not the default (zeroeth) interrupt.
587                  */
588                 ql_write32(qdev, INTR_EN,
589                            ctx->intr_en_mask);
590                 var = ql_read32(qdev, STS);
591                 return var;
592         }
593
594         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
595         if (atomic_dec_and_test(&ctx->irq_cnt)) {
596                 ql_write32(qdev, INTR_EN,
597                            ctx->intr_en_mask);
598                 var = ql_read32(qdev, STS);
599         }
600         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
601         return var;
602 }
603
604 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
605 {
606         u32 var = 0;
607         unsigned long hw_flags;
608         struct intr_context *ctx;
609
610         /* HW disables for us if we're MSIX multi interrupts and
611          * it's not the default (zeroeth) interrupt.
612          */
613         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
614                 return 0;
615
616         ctx = qdev->intr_context + intr;
617         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
618         if (!atomic_read(&ctx->irq_cnt)) {
619                 ql_write32(qdev, INTR_EN,
620                 ctx->intr_dis_mask);
621                 var = ql_read32(qdev, STS);
622         }
623         atomic_inc(&ctx->irq_cnt);
624         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
625         return var;
626 }
627
628 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
629 {
630         int i;
631         for (i = 0; i < qdev->intr_count; i++) {
632                 /* The enable call does a atomic_dec_and_test
633                  * and enables only if the result is zero.
634                  * So we precharge it here.
635                  */
636                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
637                         i == 0))
638                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
639                 ql_enable_completion_interrupt(qdev, i);
640         }
641
642 }
643
644 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
645 {
646         int status = 0;
647         /* wait for reg to come ready */
648         status = ql_wait_reg_rdy(qdev,
649                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
650         if (status)
651                 goto exit;
652         /* set up for reg read */
653         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
654         /* wait for reg to come ready */
655         status = ql_wait_reg_rdy(qdev,
656                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
657         if (status)
658                 goto exit;
659          /* This data is stored on flash as an array of
660          * __le32.  Since ql_read32() returns cpu endian
661          * we need to swap it back.
662          */
663         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
664 exit:
665         return status;
666 }
667
668 static int ql_get_flash_params(struct ql_adapter *qdev)
669 {
670         int i;
671         int status;
672         __le32 *p = (__le32 *)&qdev->flash;
673         u32 offset = 0;
674
675         /* Second function's parameters follow the first
676          * function's.
677          */
678         if (qdev->func)
679                 offset = sizeof(qdev->flash) / sizeof(u32);
680
681         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
682                 return -ETIMEDOUT;
683
684         for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
685                 status = ql_read_flash_word(qdev, i+offset, p);
686                 if (status) {
687                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
688                         goto exit;
689                 }
690
691         }
692 exit:
693         ql_sem_unlock(qdev, SEM_FLASH_MASK);
694         return status;
695 }
696
697 /* xgmac register are located behind the xgmac_addr and xgmac_data
698  * register pair.  Each read/write requires us to wait for the ready
699  * bit before reading/writing the data.
700  */
701 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
702 {
703         int status;
704         /* wait for reg to come ready */
705         status = ql_wait_reg_rdy(qdev,
706                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
707         if (status)
708                 return status;
709         /* write the data to the data reg */
710         ql_write32(qdev, XGMAC_DATA, data);
711         /* trigger the write */
712         ql_write32(qdev, XGMAC_ADDR, reg);
713         return status;
714 }
715
716 /* xgmac register are located behind the xgmac_addr and xgmac_data
717  * register pair.  Each read/write requires us to wait for the ready
718  * bit before reading/writing the data.
719  */
720 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
721 {
722         int status = 0;
723         /* wait for reg to come ready */
724         status = ql_wait_reg_rdy(qdev,
725                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
726         if (status)
727                 goto exit;
728         /* set up for reg read */
729         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
730         /* wait for reg to come ready */
731         status = ql_wait_reg_rdy(qdev,
732                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
733         if (status)
734                 goto exit;
735         /* get the data */
736         *data = ql_read32(qdev, XGMAC_DATA);
737 exit:
738         return status;
739 }
740
741 /* This is used for reading the 64-bit statistics regs. */
742 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
743 {
744         int status = 0;
745         u32 hi = 0;
746         u32 lo = 0;
747
748         status = ql_read_xgmac_reg(qdev, reg, &lo);
749         if (status)
750                 goto exit;
751
752         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
753         if (status)
754                 goto exit;
755
756         *data = (u64) lo | ((u64) hi << 32);
757
758 exit:
759         return status;
760 }
761
762 /* Take the MAC Core out of reset.
763  * Enable statistics counting.
764  * Take the transmitter/receiver out of reset.
765  * This functionality may be done in the MPI firmware at a
766  * later date.
767  */
768 static int ql_port_initialize(struct ql_adapter *qdev)
769 {
770         int status = 0;
771         u32 data;
772
773         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
774                 /* Another function has the semaphore, so
775                  * wait for the port init bit to come ready.
776                  */
777                 QPRINTK(qdev, LINK, INFO,
778                         "Another function has the semaphore, so wait for the port init bit to come ready.\n");
779                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
780                 if (status) {
781                         QPRINTK(qdev, LINK, CRIT,
782                                 "Port initialize timed out.\n");
783                 }
784                 return status;
785         }
786
787         QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
788         /* Set the core reset. */
789         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
790         if (status)
791                 goto end;
792         data |= GLOBAL_CFG_RESET;
793         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
794         if (status)
795                 goto end;
796
797         /* Clear the core reset and turn on jumbo for receiver. */
798         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
799         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
800         data |= GLOBAL_CFG_TX_STAT_EN;
801         data |= GLOBAL_CFG_RX_STAT_EN;
802         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
803         if (status)
804                 goto end;
805
806         /* Enable transmitter, and clear it's reset. */
807         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
808         if (status)
809                 goto end;
810         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
811         data |= TX_CFG_EN;      /* Enable the transmitter. */
812         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
813         if (status)
814                 goto end;
815
816         /* Enable receiver and clear it's reset. */
817         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
818         if (status)
819                 goto end;
820         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
821         data |= RX_CFG_EN;      /* Enable the receiver. */
822         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
823         if (status)
824                 goto end;
825
826         /* Turn on jumbo. */
827         status =
828             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
829         if (status)
830                 goto end;
831         status =
832             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
833         if (status)
834                 goto end;
835
836         /* Signal to the world that the port is enabled.        */
837         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
838 end:
839         ql_sem_unlock(qdev, qdev->xg_sem_mask);
840         return status;
841 }
842
843 /* Get the next large buffer. */
844 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
845 {
846         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
847         rx_ring->lbq_curr_idx++;
848         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
849                 rx_ring->lbq_curr_idx = 0;
850         rx_ring->lbq_free_cnt++;
851         return lbq_desc;
852 }
853
854 /* Get the next small buffer. */
855 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
856 {
857         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
858         rx_ring->sbq_curr_idx++;
859         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
860                 rx_ring->sbq_curr_idx = 0;
861         rx_ring->sbq_free_cnt++;
862         return sbq_desc;
863 }
864
865 /* Update an rx ring index. */
866 static void ql_update_cq(struct rx_ring *rx_ring)
867 {
868         rx_ring->cnsmr_idx++;
869         rx_ring->curr_entry++;
870         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
871                 rx_ring->cnsmr_idx = 0;
872                 rx_ring->curr_entry = rx_ring->cq_base;
873         }
874 }
875
876 static void ql_write_cq_idx(struct rx_ring *rx_ring)
877 {
878         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
879 }
880
881 /* Process (refill) a large buffer queue. */
882 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
883 {
884         int clean_idx = rx_ring->lbq_clean_idx;
885         struct bq_desc *lbq_desc;
886         u64 map;
887         int i;
888
889         while (rx_ring->lbq_free_cnt > 16) {
890                 for (i = 0; i < 16; i++) {
891                         QPRINTK(qdev, RX_STATUS, DEBUG,
892                                 "lbq: try cleaning clean_idx = %d.\n",
893                                 clean_idx);
894                         lbq_desc = &rx_ring->lbq[clean_idx];
895                         if (lbq_desc->p.lbq_page == NULL) {
896                                 QPRINTK(qdev, RX_STATUS, DEBUG,
897                                         "lbq: getting new page for index %d.\n",
898                                         lbq_desc->index);
899                                 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
900                                 if (lbq_desc->p.lbq_page == NULL) {
901                                         QPRINTK(qdev, RX_STATUS, ERR,
902                                                 "Couldn't get a page.\n");
903                                         return;
904                                 }
905                                 map = pci_map_page(qdev->pdev,
906                                                    lbq_desc->p.lbq_page,
907                                                    0, PAGE_SIZE,
908                                                    PCI_DMA_FROMDEVICE);
909                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
910                                         put_page(lbq_desc->p.lbq_page);
911                                         lbq_desc->p.lbq_page = NULL;
912                                         QPRINTK(qdev, RX_STATUS, ERR,
913                                                 "PCI mapping failed.\n");
914                                         return;
915                                 }
916                                 pci_unmap_addr_set(lbq_desc, mapaddr, map);
917                                 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
918                                 *lbq_desc->addr = cpu_to_le64(map);
919                         }
920                         clean_idx++;
921                         if (clean_idx == rx_ring->lbq_len)
922                                 clean_idx = 0;
923                 }
924
925                 rx_ring->lbq_clean_idx = clean_idx;
926                 rx_ring->lbq_prod_idx += 16;
927                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
928                         rx_ring->lbq_prod_idx = 0;
929                 QPRINTK(qdev, RX_STATUS, DEBUG,
930                         "lbq: updating prod idx = %d.\n",
931                         rx_ring->lbq_prod_idx);
932                 ql_write_db_reg(rx_ring->lbq_prod_idx,
933                                 rx_ring->lbq_prod_idx_db_reg);
934                 rx_ring->lbq_free_cnt -= 16;
935         }
936 }
937
938 /* Process (refill) a small buffer queue. */
939 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
940 {
941         int clean_idx = rx_ring->sbq_clean_idx;
942         struct bq_desc *sbq_desc;
943         u64 map;
944         int i;
945
946         while (rx_ring->sbq_free_cnt > 16) {
947                 for (i = 0; i < 16; i++) {
948                         sbq_desc = &rx_ring->sbq[clean_idx];
949                         QPRINTK(qdev, RX_STATUS, DEBUG,
950                                 "sbq: try cleaning clean_idx = %d.\n",
951                                 clean_idx);
952                         if (sbq_desc->p.skb == NULL) {
953                                 QPRINTK(qdev, RX_STATUS, DEBUG,
954                                         "sbq: getting new skb for index %d.\n",
955                                         sbq_desc->index);
956                                 sbq_desc->p.skb =
957                                     netdev_alloc_skb(qdev->ndev,
958                                                      rx_ring->sbq_buf_size);
959                                 if (sbq_desc->p.skb == NULL) {
960                                         QPRINTK(qdev, PROBE, ERR,
961                                                 "Couldn't get an skb.\n");
962                                         rx_ring->sbq_clean_idx = clean_idx;
963                                         return;
964                                 }
965                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
966                                 map = pci_map_single(qdev->pdev,
967                                                      sbq_desc->p.skb->data,
968                                                      rx_ring->sbq_buf_size /
969                                                      2, PCI_DMA_FROMDEVICE);
970                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
971                                         QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
972                                         rx_ring->sbq_clean_idx = clean_idx;
973                                         dev_kfree_skb_any(sbq_desc->p.skb);
974                                         sbq_desc->p.skb = NULL;
975                                         return;
976                                 }
977                                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
978                                 pci_unmap_len_set(sbq_desc, maplen,
979                                                   rx_ring->sbq_buf_size / 2);
980                                 *sbq_desc->addr = cpu_to_le64(map);
981                         }
982
983                         clean_idx++;
984                         if (clean_idx == rx_ring->sbq_len)
985                                 clean_idx = 0;
986                 }
987                 rx_ring->sbq_clean_idx = clean_idx;
988                 rx_ring->sbq_prod_idx += 16;
989                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
990                         rx_ring->sbq_prod_idx = 0;
991                 QPRINTK(qdev, RX_STATUS, DEBUG,
992                         "sbq: updating prod idx = %d.\n",
993                         rx_ring->sbq_prod_idx);
994                 ql_write_db_reg(rx_ring->sbq_prod_idx,
995                                 rx_ring->sbq_prod_idx_db_reg);
996
997                 rx_ring->sbq_free_cnt -= 16;
998         }
999 }
1000
1001 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1002                                     struct rx_ring *rx_ring)
1003 {
1004         ql_update_sbq(qdev, rx_ring);
1005         ql_update_lbq(qdev, rx_ring);
1006 }
1007
1008 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1009  * fails at some stage, or from the interrupt when a tx completes.
1010  */
1011 static void ql_unmap_send(struct ql_adapter *qdev,
1012                           struct tx_ring_desc *tx_ring_desc, int mapped)
1013 {
1014         int i;
1015         for (i = 0; i < mapped; i++) {
1016                 if (i == 0 || (i == 7 && mapped > 7)) {
1017                         /*
1018                          * Unmap the skb->data area, or the
1019                          * external sglist (AKA the Outbound
1020                          * Address List (OAL)).
1021                          * If its the zeroeth element, then it's
1022                          * the skb->data area.  If it's the 7th
1023                          * element and there is more than 6 frags,
1024                          * then its an OAL.
1025                          */
1026                         if (i == 7) {
1027                                 QPRINTK(qdev, TX_DONE, DEBUG,
1028                                         "unmapping OAL area.\n");
1029                         }
1030                         pci_unmap_single(qdev->pdev,
1031                                          pci_unmap_addr(&tx_ring_desc->map[i],
1032                                                         mapaddr),
1033                                          pci_unmap_len(&tx_ring_desc->map[i],
1034                                                        maplen),
1035                                          PCI_DMA_TODEVICE);
1036                 } else {
1037                         QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1038                                 i);
1039                         pci_unmap_page(qdev->pdev,
1040                                        pci_unmap_addr(&tx_ring_desc->map[i],
1041                                                       mapaddr),
1042                                        pci_unmap_len(&tx_ring_desc->map[i],
1043                                                      maplen), PCI_DMA_TODEVICE);
1044                 }
1045         }
1046
1047 }
1048
1049 /* Map the buffers for this transmit.  This will return
1050  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1051  */
1052 static int ql_map_send(struct ql_adapter *qdev,
1053                        struct ob_mac_iocb_req *mac_iocb_ptr,
1054                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1055 {
1056         int len = skb_headlen(skb);
1057         dma_addr_t map;
1058         int frag_idx, err, map_idx = 0;
1059         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1060         int frag_cnt = skb_shinfo(skb)->nr_frags;
1061
1062         if (frag_cnt) {
1063                 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1064         }
1065         /*
1066          * Map the skb buffer first.
1067          */
1068         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1069
1070         err = pci_dma_mapping_error(qdev->pdev, map);
1071         if (err) {
1072                 QPRINTK(qdev, TX_QUEUED, ERR,
1073                         "PCI mapping failed with error: %d\n", err);
1074
1075                 return NETDEV_TX_BUSY;
1076         }
1077
1078         tbd->len = cpu_to_le32(len);
1079         tbd->addr = cpu_to_le64(map);
1080         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1081         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1082         map_idx++;
1083
1084         /*
1085          * This loop fills the remainder of the 8 address descriptors
1086          * in the IOCB.  If there are more than 7 fragments, then the
1087          * eighth address desc will point to an external list (OAL).
1088          * When this happens, the remainder of the frags will be stored
1089          * in this list.
1090          */
1091         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1092                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1093                 tbd++;
1094                 if (frag_idx == 6 && frag_cnt > 7) {
1095                         /* Let's tack on an sglist.
1096                          * Our control block will now
1097                          * look like this:
1098                          * iocb->seg[0] = skb->data
1099                          * iocb->seg[1] = frag[0]
1100                          * iocb->seg[2] = frag[1]
1101                          * iocb->seg[3] = frag[2]
1102                          * iocb->seg[4] = frag[3]
1103                          * iocb->seg[5] = frag[4]
1104                          * iocb->seg[6] = frag[5]
1105                          * iocb->seg[7] = ptr to OAL (external sglist)
1106                          * oal->seg[0] = frag[6]
1107                          * oal->seg[1] = frag[7]
1108                          * oal->seg[2] = frag[8]
1109                          * oal->seg[3] = frag[9]
1110                          * oal->seg[4] = frag[10]
1111                          *      etc...
1112                          */
1113                         /* Tack on the OAL in the eighth segment of IOCB. */
1114                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1115                                              sizeof(struct oal),
1116                                              PCI_DMA_TODEVICE);
1117                         err = pci_dma_mapping_error(qdev->pdev, map);
1118                         if (err) {
1119                                 QPRINTK(qdev, TX_QUEUED, ERR,
1120                                         "PCI mapping outbound address list with error: %d\n",
1121                                         err);
1122                                 goto map_error;
1123                         }
1124
1125                         tbd->addr = cpu_to_le64(map);
1126                         /*
1127                          * The length is the number of fragments
1128                          * that remain to be mapped times the length
1129                          * of our sglist (OAL).
1130                          */
1131                         tbd->len =
1132                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1133                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1134                         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1135                                            map);
1136                         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1137                                           sizeof(struct oal));
1138                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1139                         map_idx++;
1140                 }
1141
1142                 map =
1143                     pci_map_page(qdev->pdev, frag->page,
1144                                  frag->page_offset, frag->size,
1145                                  PCI_DMA_TODEVICE);
1146
1147                 err = pci_dma_mapping_error(qdev->pdev, map);
1148                 if (err) {
1149                         QPRINTK(qdev, TX_QUEUED, ERR,
1150                                 "PCI mapping frags failed with error: %d.\n",
1151                                 err);
1152                         goto map_error;
1153                 }
1154
1155                 tbd->addr = cpu_to_le64(map);
1156                 tbd->len = cpu_to_le32(frag->size);
1157                 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1158                 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1159                                   frag->size);
1160
1161         }
1162         /* Save the number of segments we've mapped. */
1163         tx_ring_desc->map_cnt = map_idx;
1164         /* Terminate the last segment. */
1165         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1166         return NETDEV_TX_OK;
1167
1168 map_error:
1169         /*
1170          * If the first frag mapping failed, then i will be zero.
1171          * This causes the unmap of the skb->data area.  Otherwise
1172          * we pass in the number of frags that mapped successfully
1173          * so they can be umapped.
1174          */
1175         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1176         return NETDEV_TX_BUSY;
1177 }
1178
1179 static void ql_realign_skb(struct sk_buff *skb, int len)
1180 {
1181         void *temp_addr = skb->data;
1182
1183         /* Undo the skb_reserve(skb,32) we did before
1184          * giving to hardware, and realign data on
1185          * a 2-byte boundary.
1186          */
1187         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1188         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1189         skb_copy_to_linear_data(skb, temp_addr,
1190                 (unsigned int)len);
1191 }
1192
1193 /*
1194  * This function builds an skb for the given inbound
1195  * completion.  It will be rewritten for readability in the near
1196  * future, but for not it works well.
1197  */
1198 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1199                                        struct rx_ring *rx_ring,
1200                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1201 {
1202         struct bq_desc *lbq_desc;
1203         struct bq_desc *sbq_desc;
1204         struct sk_buff *skb = NULL;
1205         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1206        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1207
1208         /*
1209          * Handle the header buffer if present.
1210          */
1211         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1212             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1213                 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1214                 /*
1215                  * Headers fit nicely into a small buffer.
1216                  */
1217                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1218                 pci_unmap_single(qdev->pdev,
1219                                 pci_unmap_addr(sbq_desc, mapaddr),
1220                                 pci_unmap_len(sbq_desc, maplen),
1221                                 PCI_DMA_FROMDEVICE);
1222                 skb = sbq_desc->p.skb;
1223                 ql_realign_skb(skb, hdr_len);
1224                 skb_put(skb, hdr_len);
1225                 sbq_desc->p.skb = NULL;
1226         }
1227
1228         /*
1229          * Handle the data buffer(s).
1230          */
1231         if (unlikely(!length)) {        /* Is there data too? */
1232                 QPRINTK(qdev, RX_STATUS, DEBUG,
1233                         "No Data buffer in this packet.\n");
1234                 return skb;
1235         }
1236
1237         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1238                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1239                         QPRINTK(qdev, RX_STATUS, DEBUG,
1240                                 "Headers in small, data of %d bytes in small, combine them.\n", length);
1241                         /*
1242                          * Data is less than small buffer size so it's
1243                          * stuffed in a small buffer.
1244                          * For this case we append the data
1245                          * from the "data" small buffer to the "header" small
1246                          * buffer.
1247                          */
1248                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1249                         pci_dma_sync_single_for_cpu(qdev->pdev,
1250                                                     pci_unmap_addr
1251                                                     (sbq_desc, mapaddr),
1252                                                     pci_unmap_len
1253                                                     (sbq_desc, maplen),
1254                                                     PCI_DMA_FROMDEVICE);
1255                         memcpy(skb_put(skb, length),
1256                                sbq_desc->p.skb->data, length);
1257                         pci_dma_sync_single_for_device(qdev->pdev,
1258                                                        pci_unmap_addr
1259                                                        (sbq_desc,
1260                                                         mapaddr),
1261                                                        pci_unmap_len
1262                                                        (sbq_desc,
1263                                                         maplen),
1264                                                        PCI_DMA_FROMDEVICE);
1265                 } else {
1266                         QPRINTK(qdev, RX_STATUS, DEBUG,
1267                                 "%d bytes in a single small buffer.\n", length);
1268                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1269                         skb = sbq_desc->p.skb;
1270                         ql_realign_skb(skb, length);
1271                         skb_put(skb, length);
1272                         pci_unmap_single(qdev->pdev,
1273                                          pci_unmap_addr(sbq_desc,
1274                                                         mapaddr),
1275                                          pci_unmap_len(sbq_desc,
1276                                                        maplen),
1277                                          PCI_DMA_FROMDEVICE);
1278                         sbq_desc->p.skb = NULL;
1279                 }
1280         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1281                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1282                         QPRINTK(qdev, RX_STATUS, DEBUG,
1283                                 "Header in small, %d bytes in large. Chain large to small!\n", length);
1284                         /*
1285                          * The data is in a single large buffer.  We
1286                          * chain it to the header buffer's skb and let
1287                          * it rip.
1288                          */
1289                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1290                         pci_unmap_page(qdev->pdev,
1291                                        pci_unmap_addr(lbq_desc,
1292                                                       mapaddr),
1293                                        pci_unmap_len(lbq_desc, maplen),
1294                                        PCI_DMA_FROMDEVICE);
1295                         QPRINTK(qdev, RX_STATUS, DEBUG,
1296                                 "Chaining page to skb.\n");
1297                         skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1298                                            0, length);
1299                         skb->len += length;
1300                         skb->data_len += length;
1301                         skb->truesize += length;
1302                         lbq_desc->p.lbq_page = NULL;
1303                 } else {
1304                         /*
1305                          * The headers and data are in a single large buffer. We
1306                          * copy it to a new skb and let it go. This can happen with
1307                          * jumbo mtu on a non-TCP/UDP frame.
1308                          */
1309                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1310                         skb = netdev_alloc_skb(qdev->ndev, length);
1311                         if (skb == NULL) {
1312                                 QPRINTK(qdev, PROBE, DEBUG,
1313                                         "No skb available, drop the packet.\n");
1314                                 return NULL;
1315                         }
1316                         pci_unmap_page(qdev->pdev,
1317                                        pci_unmap_addr(lbq_desc,
1318                                                       mapaddr),
1319                                        pci_unmap_len(lbq_desc, maplen),
1320                                        PCI_DMA_FROMDEVICE);
1321                         skb_reserve(skb, NET_IP_ALIGN);
1322                         QPRINTK(qdev, RX_STATUS, DEBUG,
1323                                 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1324                         skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1325                                            0, length);
1326                         skb->len += length;
1327                         skb->data_len += length;
1328                         skb->truesize += length;
1329                         length -= length;
1330                         lbq_desc->p.lbq_page = NULL;
1331                         __pskb_pull_tail(skb,
1332                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1333                                 VLAN_ETH_HLEN : ETH_HLEN);
1334                 }
1335         } else {
1336                 /*
1337                  * The data is in a chain of large buffers
1338                  * pointed to by a small buffer.  We loop
1339                  * thru and chain them to the our small header
1340                  * buffer's skb.
1341                  * frags:  There are 18 max frags and our small
1342                  *         buffer will hold 32 of them. The thing is,
1343                  *         we'll use 3 max for our 9000 byte jumbo
1344                  *         frames.  If the MTU goes up we could
1345                  *          eventually be in trouble.
1346                  */
1347                 int size, offset, i = 0;
1348                 __le64 *bq, bq_array[8];
1349                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1350                 pci_unmap_single(qdev->pdev,
1351                                  pci_unmap_addr(sbq_desc, mapaddr),
1352                                  pci_unmap_len(sbq_desc, maplen),
1353                                  PCI_DMA_FROMDEVICE);
1354                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1355                         /*
1356                          * This is an non TCP/UDP IP frame, so
1357                          * the headers aren't split into a small
1358                          * buffer.  We have to use the small buffer
1359                          * that contains our sg list as our skb to
1360                          * send upstairs. Copy the sg list here to
1361                          * a local buffer and use it to find the
1362                          * pages to chain.
1363                          */
1364                         QPRINTK(qdev, RX_STATUS, DEBUG,
1365                                 "%d bytes of headers & data in chain of large.\n", length);
1366                         skb = sbq_desc->p.skb;
1367                         bq = &bq_array[0];
1368                         memcpy(bq, skb->data, sizeof(bq_array));
1369                         sbq_desc->p.skb = NULL;
1370                         skb_reserve(skb, NET_IP_ALIGN);
1371                 } else {
1372                         QPRINTK(qdev, RX_STATUS, DEBUG,
1373                                 "Headers in small, %d bytes of data in chain of large.\n", length);
1374                         bq = (__le64 *)sbq_desc->p.skb->data;
1375                 }
1376                 while (length > 0) {
1377                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1378                         pci_unmap_page(qdev->pdev,
1379                                        pci_unmap_addr(lbq_desc,
1380                                                       mapaddr),
1381                                        pci_unmap_len(lbq_desc,
1382                                                      maplen),
1383                                        PCI_DMA_FROMDEVICE);
1384                         size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1385                         offset = 0;
1386
1387                         QPRINTK(qdev, RX_STATUS, DEBUG,
1388                                 "Adding page %d to skb for %d bytes.\n",
1389                                 i, size);
1390                         skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1391                                            offset, size);
1392                         skb->len += size;
1393                         skb->data_len += size;
1394                         skb->truesize += size;
1395                         length -= size;
1396                         lbq_desc->p.lbq_page = NULL;
1397                         bq++;
1398                         i++;
1399                 }
1400                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1401                                 VLAN_ETH_HLEN : ETH_HLEN);
1402         }
1403         return skb;
1404 }
1405
1406 /* Process an inbound completion from an rx ring. */
1407 static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1408                                    struct rx_ring *rx_ring,
1409                                    struct ib_mac_iocb_rsp *ib_mac_rsp)
1410 {
1411         struct net_device *ndev = qdev->ndev;
1412         struct sk_buff *skb = NULL;
1413
1414         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1415
1416         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1417         if (unlikely(!skb)) {
1418                 QPRINTK(qdev, RX_STATUS, DEBUG,
1419                         "No skb available, drop packet.\n");
1420                 return;
1421         }
1422
1423         prefetch(skb->data);
1424         skb->dev = ndev;
1425         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1426                 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1427                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1428                         IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1429                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1430                         IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1431                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1432                         IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1433         }
1434         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1435                 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1436         }
1437         if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1438                 QPRINTK(qdev, RX_STATUS, ERR,
1439                         "Bad checksum for this %s packet.\n",
1440                         ((ib_mac_rsp->
1441                           flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1442                 skb->ip_summed = CHECKSUM_NONE;
1443         } else if (qdev->rx_csum &&
1444                    ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1445                     ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1446                      !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1447                 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1448                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1449         }
1450         qdev->stats.rx_packets++;
1451         qdev->stats.rx_bytes += skb->len;
1452         skb->protocol = eth_type_trans(skb, ndev);
1453         if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1454                 QPRINTK(qdev, RX_STATUS, DEBUG,
1455                         "Passing a VLAN packet upstream.\n");
1456                 vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
1457                                 le16_to_cpu(ib_mac_rsp->vlan_id));
1458         } else {
1459                 QPRINTK(qdev, RX_STATUS, DEBUG,
1460                         "Passing a normal packet upstream.\n");
1461                 netif_receive_skb(skb);
1462         }
1463 }
1464
1465 /* Process an outbound completion from an rx ring. */
1466 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1467                                    struct ob_mac_iocb_rsp *mac_rsp)
1468 {
1469         struct tx_ring *tx_ring;
1470         struct tx_ring_desc *tx_ring_desc;
1471
1472         QL_DUMP_OB_MAC_RSP(mac_rsp);
1473         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1474         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1475         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1476         qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1477         qdev->stats.tx_packets++;
1478         dev_kfree_skb(tx_ring_desc->skb);
1479         tx_ring_desc->skb = NULL;
1480
1481         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1482                                         OB_MAC_IOCB_RSP_S |
1483                                         OB_MAC_IOCB_RSP_L |
1484                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1485                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1486                         QPRINTK(qdev, TX_DONE, WARNING,
1487                                 "Total descriptor length did not match transfer length.\n");
1488                 }
1489                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1490                         QPRINTK(qdev, TX_DONE, WARNING,
1491                                 "Frame too short to be legal, not sent.\n");
1492                 }
1493                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1494                         QPRINTK(qdev, TX_DONE, WARNING,
1495                                 "Frame too long, but sent anyway.\n");
1496                 }
1497                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1498                         QPRINTK(qdev, TX_DONE, WARNING,
1499                                 "PCI backplane error. Frame not sent.\n");
1500                 }
1501         }
1502         atomic_inc(&tx_ring->tx_count);
1503 }
1504
1505 /* Fire up a handler to reset the MPI processor. */
1506 void ql_queue_fw_error(struct ql_adapter *qdev)
1507 {
1508         netif_stop_queue(qdev->ndev);
1509         netif_carrier_off(qdev->ndev);
1510         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1511 }
1512
1513 void ql_queue_asic_error(struct ql_adapter *qdev)
1514 {
1515         netif_stop_queue(qdev->ndev);
1516         netif_carrier_off(qdev->ndev);
1517         ql_disable_interrupts(qdev);
1518         /* Clear adapter up bit to signal the recovery
1519          * process that it shouldn't kill the reset worker
1520          * thread
1521          */
1522         clear_bit(QL_ADAPTER_UP, &qdev->flags);
1523         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1524 }
1525
1526 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1527                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
1528 {
1529         switch (ib_ae_rsp->event) {
1530         case MGMT_ERR_EVENT:
1531                 QPRINTK(qdev, RX_ERR, ERR,
1532                         "Management Processor Fatal Error.\n");
1533                 ql_queue_fw_error(qdev);
1534                 return;
1535
1536         case CAM_LOOKUP_ERR_EVENT:
1537                 QPRINTK(qdev, LINK, ERR,
1538                         "Multiple CAM hits lookup occurred.\n");
1539                 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1540                 ql_queue_asic_error(qdev);
1541                 return;
1542
1543         case SOFT_ECC_ERROR_EVENT:
1544                 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1545                 ql_queue_asic_error(qdev);
1546                 break;
1547
1548         case PCI_ERR_ANON_BUF_RD:
1549                 QPRINTK(qdev, RX_ERR, ERR,
1550                         "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1551                         ib_ae_rsp->q_id);
1552                 ql_queue_asic_error(qdev);
1553                 break;
1554
1555         default:
1556                 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1557                         ib_ae_rsp->event);
1558                 ql_queue_asic_error(qdev);
1559                 break;
1560         }
1561 }
1562
1563 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1564 {
1565         struct ql_adapter *qdev = rx_ring->qdev;
1566         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1567         struct ob_mac_iocb_rsp *net_rsp = NULL;
1568         int count = 0;
1569
1570         /* While there are entries in the completion queue. */
1571         while (prod != rx_ring->cnsmr_idx) {
1572
1573                 QPRINTK(qdev, RX_STATUS, DEBUG,
1574                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1575                         prod, rx_ring->cnsmr_idx);
1576
1577                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1578                 rmb();
1579                 switch (net_rsp->opcode) {
1580
1581                 case OPCODE_OB_MAC_TSO_IOCB:
1582                 case OPCODE_OB_MAC_IOCB:
1583                         ql_process_mac_tx_intr(qdev, net_rsp);
1584                         break;
1585                 default:
1586                         QPRINTK(qdev, RX_STATUS, DEBUG,
1587                                 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1588                                 net_rsp->opcode);
1589                 }
1590                 count++;
1591                 ql_update_cq(rx_ring);
1592                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1593         }
1594         ql_write_cq_idx(rx_ring);
1595         if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1596                 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1597                 if (atomic_read(&tx_ring->queue_stopped) &&
1598                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1599                         /*
1600                          * The queue got stopped because the tx_ring was full.
1601                          * Wake it up, because it's now at least 25% empty.
1602                          */
1603                         netif_wake_queue(qdev->ndev);
1604         }
1605
1606         return count;
1607 }
1608
1609 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1610 {
1611         struct ql_adapter *qdev = rx_ring->qdev;
1612         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1613         struct ql_net_rsp_iocb *net_rsp;
1614         int count = 0;
1615
1616         /* While there are entries in the completion queue. */
1617         while (prod != rx_ring->cnsmr_idx) {
1618
1619                 QPRINTK(qdev, RX_STATUS, DEBUG,
1620                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1621                         prod, rx_ring->cnsmr_idx);
1622
1623                 net_rsp = rx_ring->curr_entry;
1624                 rmb();
1625                 switch (net_rsp->opcode) {
1626                 case OPCODE_IB_MAC_IOCB:
1627                         ql_process_mac_rx_intr(qdev, rx_ring,
1628                                                (struct ib_mac_iocb_rsp *)
1629                                                net_rsp);
1630                         break;
1631
1632                 case OPCODE_IB_AE_IOCB:
1633                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1634                                                 net_rsp);
1635                         break;
1636                 default:
1637                         {
1638                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1639                                         "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1640                                         net_rsp->opcode);
1641                         }
1642                 }
1643                 count++;
1644                 ql_update_cq(rx_ring);
1645                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1646                 if (count == budget)
1647                         break;
1648         }
1649         ql_update_buffer_queues(qdev, rx_ring);
1650         ql_write_cq_idx(rx_ring);
1651         return count;
1652 }
1653
1654 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1655 {
1656         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1657         struct ql_adapter *qdev = rx_ring->qdev;
1658         int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1659
1660         QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1661                 rx_ring->cq_id);
1662
1663         if (work_done < budget) {
1664                 __netif_rx_complete(napi);
1665                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1666         }
1667         return work_done;
1668 }
1669
1670 static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1671 {
1672         struct ql_adapter *qdev = netdev_priv(ndev);
1673
1674         qdev->vlgrp = grp;
1675         if (grp) {
1676                 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1677                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1678                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1679         } else {
1680                 QPRINTK(qdev, IFUP, DEBUG,
1681                         "Turning off VLAN in NIC_RCV_CFG.\n");
1682                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1683         }
1684 }
1685
1686 static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1687 {
1688         struct ql_adapter *qdev = netdev_priv(ndev);
1689         u32 enable_bit = MAC_ADDR_E;
1690
1691         spin_lock(&qdev->hw_lock);
1692         if (ql_set_mac_addr_reg
1693             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1694                 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1695         }
1696         spin_unlock(&qdev->hw_lock);
1697 }
1698
1699 static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1700 {
1701         struct ql_adapter *qdev = netdev_priv(ndev);
1702         u32 enable_bit = 0;
1703
1704         spin_lock(&qdev->hw_lock);
1705         if (ql_set_mac_addr_reg
1706             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1707                 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1708         }
1709         spin_unlock(&qdev->hw_lock);
1710
1711 }
1712
1713 /* Worker thread to process a given rx_ring that is dedicated
1714  * to outbound completions.
1715  */
1716 static void ql_tx_clean(struct work_struct *work)
1717 {
1718         struct rx_ring *rx_ring =
1719             container_of(work, struct rx_ring, rx_work.work);
1720         ql_clean_outbound_rx_ring(rx_ring);
1721         ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1722
1723 }
1724
1725 /* Worker thread to process a given rx_ring that is dedicated
1726  * to inbound completions.
1727  */
1728 static void ql_rx_clean(struct work_struct *work)
1729 {
1730         struct rx_ring *rx_ring =
1731             container_of(work, struct rx_ring, rx_work.work);
1732         ql_clean_inbound_rx_ring(rx_ring, 64);
1733         ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1734 }
1735
1736 /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1737 static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1738 {
1739         struct rx_ring *rx_ring = dev_id;
1740         queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1741                               &rx_ring->rx_work, 0);
1742         return IRQ_HANDLED;
1743 }
1744
1745 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1746 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1747 {
1748         struct rx_ring *rx_ring = dev_id;
1749         netif_rx_schedule(&rx_ring->napi);
1750         return IRQ_HANDLED;
1751 }
1752
1753 /* This handles a fatal error, MPI activity, and the default
1754  * rx_ring in an MSI-X multiple vector environment.
1755  * In MSI/Legacy environment it also process the rest of
1756  * the rx_rings.
1757  */
1758 static irqreturn_t qlge_isr(int irq, void *dev_id)
1759 {
1760         struct rx_ring *rx_ring = dev_id;
1761         struct ql_adapter *qdev = rx_ring->qdev;
1762         struct intr_context *intr_context = &qdev->intr_context[0];
1763         u32 var;
1764         int i;
1765         int work_done = 0;
1766
1767         spin_lock(&qdev->hw_lock);
1768         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1769                 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1770                 spin_unlock(&qdev->hw_lock);
1771                 return IRQ_NONE;
1772         }
1773         spin_unlock(&qdev->hw_lock);
1774
1775         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
1776
1777         /*
1778          * Check for fatal error.
1779          */
1780         if (var & STS_FE) {
1781                 ql_queue_asic_error(qdev);
1782                 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1783                 var = ql_read32(qdev, ERR_STS);
1784                 QPRINTK(qdev, INTR, ERR,
1785                         "Resetting chip. Error Status Register = 0x%x\n", var);
1786                 return IRQ_HANDLED;
1787         }
1788
1789         /*
1790          * Check MPI processor activity.
1791          */
1792         if (var & STS_PI) {
1793                 /*
1794                  * We've got an async event or mailbox completion.
1795                  * Handle it and clear the source of the interrupt.
1796                  */
1797                 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1798                 ql_disable_completion_interrupt(qdev, intr_context->intr);
1799                 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1800                                       &qdev->mpi_work, 0);
1801                 work_done++;
1802         }
1803
1804         /*
1805          * Check the default queue and wake handler if active.
1806          */
1807         rx_ring = &qdev->rx_ring[0];
1808         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1809                 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1810                 ql_disable_completion_interrupt(qdev, intr_context->intr);
1811                 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1812                                       &rx_ring->rx_work, 0);
1813                 work_done++;
1814         }
1815
1816         if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1817                 /*
1818                  * Start the DPC for each active queue.
1819                  */
1820                 for (i = 1; i < qdev->rx_ring_count; i++) {
1821                         rx_ring = &qdev->rx_ring[i];
1822                         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1823                             rx_ring->cnsmr_idx) {
1824                                 QPRINTK(qdev, INTR, INFO,
1825                                         "Waking handler for rx_ring[%d].\n", i);
1826                                 ql_disable_completion_interrupt(qdev,
1827                                                                 intr_context->
1828                                                                 intr);
1829                                 if (i < qdev->rss_ring_first_cq_id)
1830                                         queue_delayed_work_on(rx_ring->cpu,
1831                                                               qdev->q_workqueue,
1832                                                               &rx_ring->rx_work,
1833                                                               0);
1834                                 else
1835                                         netif_rx_schedule(&rx_ring->napi);
1836                                 work_done++;
1837                         }
1838                 }
1839         }
1840         ql_enable_completion_interrupt(qdev, intr_context->intr);
1841         return work_done ? IRQ_HANDLED : IRQ_NONE;
1842 }
1843
1844 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1845 {
1846
1847         if (skb_is_gso(skb)) {
1848                 int err;
1849                 if (skb_header_cloned(skb)) {
1850                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1851                         if (err)
1852                                 return err;
1853                 }
1854
1855                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1856                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1857                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1858                 mac_iocb_ptr->total_hdrs_len =
1859                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1860                 mac_iocb_ptr->net_trans_offset =
1861                     cpu_to_le16(skb_network_offset(skb) |
1862                                 skb_transport_offset(skb)
1863                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
1864                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1865                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1866                 if (likely(skb->protocol == htons(ETH_P_IP))) {
1867                         struct iphdr *iph = ip_hdr(skb);
1868                         iph->check = 0;
1869                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1870                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1871                                                                  iph->daddr, 0,
1872                                                                  IPPROTO_TCP,
1873                                                                  0);
1874                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1875                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1876                         tcp_hdr(skb)->check =
1877                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1878                                              &ipv6_hdr(skb)->daddr,
1879                                              0, IPPROTO_TCP, 0);
1880                 }
1881                 return 1;
1882         }
1883         return 0;
1884 }
1885
1886 static void ql_hw_csum_setup(struct sk_buff *skb,
1887                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1888 {
1889         int len;
1890         struct iphdr *iph = ip_hdr(skb);
1891         __sum16 *check;
1892         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1893         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1894         mac_iocb_ptr->net_trans_offset =
1895                 cpu_to_le16(skb_network_offset(skb) |
1896                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1897
1898         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1899         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1900         if (likely(iph->protocol == IPPROTO_TCP)) {
1901                 check = &(tcp_hdr(skb)->check);
1902                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1903                 mac_iocb_ptr->total_hdrs_len =
1904                     cpu_to_le16(skb_transport_offset(skb) +
1905                                 (tcp_hdr(skb)->doff << 2));
1906         } else {
1907                 check = &(udp_hdr(skb)->check);
1908                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1909                 mac_iocb_ptr->total_hdrs_len =
1910                     cpu_to_le16(skb_transport_offset(skb) +
1911                                 sizeof(struct udphdr));
1912         }
1913         *check = ~csum_tcpudp_magic(iph->saddr,
1914                                     iph->daddr, len, iph->protocol, 0);
1915 }
1916
1917 static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1918 {
1919         struct tx_ring_desc *tx_ring_desc;
1920         struct ob_mac_iocb_req *mac_iocb_ptr;
1921         struct ql_adapter *qdev = netdev_priv(ndev);
1922         int tso;
1923         struct tx_ring *tx_ring;
1924         u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1925
1926         tx_ring = &qdev->tx_ring[tx_ring_idx];
1927
1928         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1929                 QPRINTK(qdev, TX_QUEUED, INFO,
1930                         "%s: shutting down tx queue %d du to lack of resources.\n",
1931                         __func__, tx_ring_idx);
1932                 netif_stop_queue(ndev);
1933                 atomic_inc(&tx_ring->queue_stopped);
1934                 return NETDEV_TX_BUSY;
1935         }
1936         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1937         mac_iocb_ptr = tx_ring_desc->queue_entry;
1938         memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1939         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1940                 QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1941                 return NETDEV_TX_BUSY;
1942         }
1943
1944         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1945         mac_iocb_ptr->tid = tx_ring_desc->index;
1946         /* We use the upper 32-bits to store the tx queue for this IO.
1947          * When we get the completion we can use it to establish the context.
1948          */
1949         mac_iocb_ptr->txq_idx = tx_ring_idx;
1950         tx_ring_desc->skb = skb;
1951
1952         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1953
1954         if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1955                 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1956                         vlan_tx_tag_get(skb));
1957                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1958                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1959         }
1960         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1961         if (tso < 0) {
1962                 dev_kfree_skb_any(skb);
1963                 return NETDEV_TX_OK;
1964         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1965                 ql_hw_csum_setup(skb,
1966                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1967         }
1968         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1969         tx_ring->prod_idx++;
1970         if (tx_ring->prod_idx == tx_ring->wq_len)
1971                 tx_ring->prod_idx = 0;
1972         wmb();
1973
1974         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1975         ndev->trans_start = jiffies;
1976         QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1977                 tx_ring->prod_idx, skb->len);
1978
1979         atomic_dec(&tx_ring->tx_count);
1980         return NETDEV_TX_OK;
1981 }
1982
1983 static void ql_free_shadow_space(struct ql_adapter *qdev)
1984 {
1985         if (qdev->rx_ring_shadow_reg_area) {
1986                 pci_free_consistent(qdev->pdev,
1987                                     PAGE_SIZE,
1988                                     qdev->rx_ring_shadow_reg_area,
1989                                     qdev->rx_ring_shadow_reg_dma);
1990                 qdev->rx_ring_shadow_reg_area = NULL;
1991         }
1992         if (qdev->tx_ring_shadow_reg_area) {
1993                 pci_free_consistent(qdev->pdev,
1994                                     PAGE_SIZE,
1995                                     qdev->tx_ring_shadow_reg_area,
1996                                     qdev->tx_ring_shadow_reg_dma);
1997                 qdev->tx_ring_shadow_reg_area = NULL;
1998         }
1999 }
2000
2001 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2002 {
2003         qdev->rx_ring_shadow_reg_area =
2004             pci_alloc_consistent(qdev->pdev,
2005                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2006         if (qdev->rx_ring_shadow_reg_area == NULL) {
2007                 QPRINTK(qdev, IFUP, ERR,
2008                         "Allocation of RX shadow space failed.\n");
2009                 return -ENOMEM;
2010         }
2011         qdev->tx_ring_shadow_reg_area =
2012             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2013                                  &qdev->tx_ring_shadow_reg_dma);
2014         if (qdev->tx_ring_shadow_reg_area == NULL) {
2015                 QPRINTK(qdev, IFUP, ERR,
2016                         "Allocation of TX shadow space failed.\n");
2017                 goto err_wqp_sh_area;
2018         }
2019         return 0;
2020
2021 err_wqp_sh_area:
2022         pci_free_consistent(qdev->pdev,
2023                             PAGE_SIZE,
2024                             qdev->rx_ring_shadow_reg_area,
2025                             qdev->rx_ring_shadow_reg_dma);
2026         return -ENOMEM;
2027 }
2028
2029 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2030 {
2031         struct tx_ring_desc *tx_ring_desc;
2032         int i;
2033         struct ob_mac_iocb_req *mac_iocb_ptr;
2034
2035         mac_iocb_ptr = tx_ring->wq_base;
2036         tx_ring_desc = tx_ring->q;
2037         for (i = 0; i < tx_ring->wq_len; i++) {
2038                 tx_ring_desc->index = i;
2039                 tx_ring_desc->skb = NULL;
2040                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2041                 mac_iocb_ptr++;
2042                 tx_ring_desc++;
2043         }
2044         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2045         atomic_set(&tx_ring->queue_stopped, 0);
2046 }
2047
2048 static void ql_free_tx_resources(struct ql_adapter *qdev,
2049                                  struct tx_ring *tx_ring)
2050 {
2051         if (tx_ring->wq_base) {
2052                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2053                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2054                 tx_ring->wq_base = NULL;
2055         }
2056         kfree(tx_ring->q);
2057         tx_ring->q = NULL;
2058 }
2059
2060 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2061                                  struct tx_ring *tx_ring)
2062 {
2063         tx_ring->wq_base =
2064             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2065                                  &tx_ring->wq_base_dma);
2066
2067         if ((tx_ring->wq_base == NULL)
2068             || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2069                 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2070                 return -ENOMEM;
2071         }
2072         tx_ring->q =
2073             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2074         if (tx_ring->q == NULL)
2075                 goto err;
2076
2077         return 0;
2078 err:
2079         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2080                             tx_ring->wq_base, tx_ring->wq_base_dma);
2081         return -ENOMEM;
2082 }
2083
2084 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2085 {
2086         int i;
2087         struct bq_desc *lbq_desc;
2088
2089         for (i = 0; i < rx_ring->lbq_len; i++) {
2090                 lbq_desc = &rx_ring->lbq[i];
2091                 if (lbq_desc->p.lbq_page) {
2092                         pci_unmap_page(qdev->pdev,
2093                                        pci_unmap_addr(lbq_desc, mapaddr),
2094                                        pci_unmap_len(lbq_desc, maplen),
2095                                        PCI_DMA_FROMDEVICE);
2096
2097                         put_page(lbq_desc->p.lbq_page);
2098                         lbq_desc->p.lbq_page = NULL;
2099                 }
2100         }
2101 }
2102
2103 /*
2104  * Allocate and map a page for each element of the lbq.
2105  */
2106 static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2107                                 struct rx_ring *rx_ring)
2108 {
2109         int i;
2110         struct bq_desc *lbq_desc;
2111         u64 map;
2112         __le64 *bq = rx_ring->lbq_base;
2113
2114         for (i = 0; i < rx_ring->lbq_len; i++) {
2115                 lbq_desc = &rx_ring->lbq[i];
2116                 memset(lbq_desc, 0, sizeof(lbq_desc));
2117                 lbq_desc->addr = bq;
2118                 lbq_desc->index = i;
2119                 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2120                 if (unlikely(!lbq_desc->p.lbq_page)) {
2121                         QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2122                         goto mem_error;
2123                 } else {
2124                         map = pci_map_page(qdev->pdev,
2125                                            lbq_desc->p.lbq_page,
2126                                            0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2127                         if (pci_dma_mapping_error(qdev->pdev, map)) {
2128                                 QPRINTK(qdev, IFUP, ERR,
2129                                         "PCI mapping failed.\n");
2130                                 goto mem_error;
2131                         }
2132                         pci_unmap_addr_set(lbq_desc, mapaddr, map);
2133                         pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2134                         *lbq_desc->addr = cpu_to_le64(map);
2135                 }
2136                 bq++;
2137         }
2138         return 0;
2139 mem_error:
2140         ql_free_lbq_buffers(qdev, rx_ring);
2141         return -ENOMEM;
2142 }
2143
2144 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2145 {
2146         int i;
2147         struct bq_desc *sbq_desc;
2148
2149         for (i = 0; i < rx_ring->sbq_len; i++) {
2150                 sbq_desc = &rx_ring->sbq[i];
2151                 if (sbq_desc == NULL) {
2152                         QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2153                         return;
2154                 }
2155                 if (sbq_desc->p.skb) {
2156                         pci_unmap_single(qdev->pdev,
2157                                          pci_unmap_addr(sbq_desc, mapaddr),
2158                                          pci_unmap_len(sbq_desc, maplen),
2159                                          PCI_DMA_FROMDEVICE);
2160                         dev_kfree_skb(sbq_desc->p.skb);
2161                         sbq_desc->p.skb = NULL;
2162                 }
2163         }
2164 }
2165
2166 /* Allocate and map an skb for each element of the sbq. */
2167 static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2168                                 struct rx_ring *rx_ring)
2169 {
2170         int i;
2171         struct bq_desc *sbq_desc;
2172         struct sk_buff *skb;
2173         u64 map;
2174         __le64 *bq = rx_ring->sbq_base;
2175
2176         for (i = 0; i < rx_ring->sbq_len; i++) {
2177                 sbq_desc = &rx_ring->sbq[i];
2178                 memset(sbq_desc, 0, sizeof(sbq_desc));
2179                 sbq_desc->index = i;
2180                 sbq_desc->addr = bq;
2181                 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2182                 if (unlikely(!skb)) {
2183                         /* Better luck next round */
2184                         QPRINTK(qdev, IFUP, ERR,
2185                                 "small buff alloc failed for %d bytes at index %d.\n",
2186                                 rx_ring->sbq_buf_size, i);
2187                         goto mem_err;
2188                 }
2189                 skb_reserve(skb, QLGE_SB_PAD);
2190                 sbq_desc->p.skb = skb;
2191                 /*
2192                  * Map only half the buffer. Because the
2193                  * other half may get some data copied to it
2194                  * when the completion arrives.
2195                  */
2196                 map = pci_map_single(qdev->pdev,
2197                                      skb->data,
2198                                      rx_ring->sbq_buf_size / 2,
2199                                      PCI_DMA_FROMDEVICE);
2200                 if (pci_dma_mapping_error(qdev->pdev, map)) {
2201                         QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2202                         goto mem_err;
2203                 }
2204                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2205                 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2206                 *sbq_desc->addr = cpu_to_le64(map);
2207                 bq++;
2208         }
2209         return 0;
2210 mem_err:
2211         ql_free_sbq_buffers(qdev, rx_ring);
2212         return -ENOMEM;
2213 }
2214
2215 static void ql_free_rx_resources(struct ql_adapter *qdev,
2216                                  struct rx_ring *rx_ring)
2217 {
2218         if (rx_ring->sbq_len)
2219                 ql_free_sbq_buffers(qdev, rx_ring);
2220         if (rx_ring->lbq_len)
2221                 ql_free_lbq_buffers(qdev, rx_ring);
2222
2223         /* Free the small buffer queue. */
2224         if (rx_ring->sbq_base) {
2225                 pci_free_consistent(qdev->pdev,
2226                                     rx_ring->sbq_size,
2227                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2228                 rx_ring->sbq_base = NULL;
2229         }
2230
2231         /* Free the small buffer queue control blocks. */
2232         kfree(rx_ring->sbq);
2233         rx_ring->sbq = NULL;
2234
2235         /* Free the large buffer queue. */
2236         if (rx_ring->lbq_base) {
2237                 pci_free_consistent(qdev->pdev,
2238                                     rx_ring->lbq_size,
2239                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2240                 rx_ring->lbq_base = NULL;
2241         }
2242
2243         /* Free the large buffer queue control blocks. */
2244         kfree(rx_ring->lbq);
2245         rx_ring->lbq = NULL;
2246
2247         /* Free the rx queue. */
2248         if (rx_ring->cq_base) {
2249                 pci_free_consistent(qdev->pdev,
2250                                     rx_ring->cq_size,
2251                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2252                 rx_ring->cq_base = NULL;
2253         }
2254 }
2255
2256 /* Allocate queues and buffers for this completions queue based
2257  * on the values in the parameter structure. */
2258 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2259                                  struct rx_ring *rx_ring)
2260 {
2261
2262         /*
2263          * Allocate the completion queue for this rx_ring.
2264          */
2265         rx_ring->cq_base =
2266             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2267                                  &rx_ring->cq_base_dma);
2268
2269         if (rx_ring->cq_base == NULL) {
2270                 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2271                 return -ENOMEM;
2272         }
2273
2274         if (rx_ring->sbq_len) {
2275                 /*
2276                  * Allocate small buffer queue.
2277                  */
2278                 rx_ring->sbq_base =
2279                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2280                                          &rx_ring->sbq_base_dma);
2281
2282                 if (rx_ring->sbq_base == NULL) {
2283                         QPRINTK(qdev, IFUP, ERR,
2284                                 "Small buffer queue allocation failed.\n");
2285                         goto err_mem;
2286                 }
2287
2288                 /*
2289                  * Allocate small buffer queue control blocks.
2290                  */
2291                 rx_ring->sbq =
2292                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2293                             GFP_KERNEL);
2294                 if (rx_ring->sbq == NULL) {
2295                         QPRINTK(qdev, IFUP, ERR,
2296                                 "Small buffer queue control block allocation failed.\n");
2297                         goto err_mem;
2298                 }
2299
2300                 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2301                         QPRINTK(qdev, IFUP, ERR,
2302                                 "Small buffer allocation failed.\n");
2303                         goto err_mem;
2304                 }
2305         }
2306
2307         if (rx_ring->lbq_len) {
2308                 /*
2309                  * Allocate large buffer queue.
2310                  */
2311                 rx_ring->lbq_base =
2312                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2313                                          &rx_ring->lbq_base_dma);
2314
2315                 if (rx_ring->lbq_base == NULL) {
2316                         QPRINTK(qdev, IFUP, ERR,
2317                                 "Large buffer queue allocation failed.\n");
2318                         goto err_mem;
2319                 }
2320                 /*
2321                  * Allocate large buffer queue control blocks.
2322                  */
2323                 rx_ring->lbq =
2324                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2325                             GFP_KERNEL);
2326                 if (rx_ring->lbq == NULL) {
2327                         QPRINTK(qdev, IFUP, ERR,
2328                                 "Large buffer queue control block allocation failed.\n");
2329                         goto err_mem;
2330                 }
2331
2332                 /*
2333                  * Allocate the buffers.
2334                  */
2335                 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2336                         QPRINTK(qdev, IFUP, ERR,
2337                                 "Large buffer allocation failed.\n");
2338                         goto err_mem;
2339                 }
2340         }
2341
2342         return 0;
2343
2344 err_mem:
2345         ql_free_rx_resources(qdev, rx_ring);
2346         return -ENOMEM;
2347 }
2348
2349 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2350 {
2351         struct tx_ring *tx_ring;
2352         struct tx_ring_desc *tx_ring_desc;
2353         int i, j;
2354
2355         /*
2356          * Loop through all queues and free
2357          * any resources.
2358          */
2359         for (j = 0; j < qdev->tx_ring_count; j++) {
2360                 tx_ring = &qdev->tx_ring[j];
2361                 for (i = 0; i < tx_ring->wq_len; i++) {
2362                         tx_ring_desc = &tx_ring->q[i];
2363                         if (tx_ring_desc && tx_ring_desc->skb) {
2364                                 QPRINTK(qdev, IFDOWN, ERR,
2365                                 "Freeing lost SKB %p, from queue %d, index %d.\n",
2366                                         tx_ring_desc->skb, j,
2367                                         tx_ring_desc->index);
2368                                 ql_unmap_send(qdev, tx_ring_desc,
2369                                               tx_ring_desc->map_cnt);
2370                                 dev_kfree_skb(tx_ring_desc->skb);
2371                                 tx_ring_desc->skb = NULL;
2372                         }
2373                 }
2374         }
2375 }
2376
2377 static void ql_free_mem_resources(struct ql_adapter *qdev)
2378 {
2379         int i;
2380
2381         for (i = 0; i < qdev->tx_ring_count; i++)
2382                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2383         for (i = 0; i < qdev->rx_ring_count; i++)
2384                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2385         ql_free_shadow_space(qdev);
2386 }
2387
2388 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2389 {
2390         int i;
2391
2392         /* Allocate space for our shadow registers and such. */
2393         if (ql_alloc_shadow_space(qdev))
2394                 return -ENOMEM;
2395
2396         for (i = 0; i < qdev->rx_ring_count; i++) {
2397                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2398                         QPRINTK(qdev, IFUP, ERR,
2399                                 "RX resource allocation failed.\n");
2400                         goto err_mem;
2401                 }
2402         }
2403         /* Allocate tx queue resources */
2404         for (i = 0; i < qdev->tx_ring_count; i++) {
2405                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2406                         QPRINTK(qdev, IFUP, ERR,
2407                                 "TX resource allocation failed.\n");
2408                         goto err_mem;
2409                 }
2410         }
2411         return 0;
2412
2413 err_mem:
2414         ql_free_mem_resources(qdev);
2415         return -ENOMEM;
2416 }
2417
2418 /* Set up the rx ring control block and pass it to the chip.
2419  * The control block is defined as
2420  * "Completion Queue Initialization Control Block", or cqicb.
2421  */
2422 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2423 {
2424         struct cqicb *cqicb = &rx_ring->cqicb;
2425         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2426             (rx_ring->cq_id * sizeof(u64) * 4);
2427         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2428             (rx_ring->cq_id * sizeof(u64) * 4);
2429         void __iomem *doorbell_area =
2430             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2431         int err = 0;
2432         u16 bq_len;
2433
2434         /* Set up the shadow registers for this ring. */
2435         rx_ring->prod_idx_sh_reg = shadow_reg;
2436         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2437         shadow_reg += sizeof(u64);
2438         shadow_reg_dma += sizeof(u64);
2439         rx_ring->lbq_base_indirect = shadow_reg;
2440         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2441         shadow_reg += sizeof(u64);
2442         shadow_reg_dma += sizeof(u64);
2443         rx_ring->sbq_base_indirect = shadow_reg;
2444         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2445
2446         /* PCI doorbell mem area + 0x00 for consumer index register */
2447         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
2448         rx_ring->cnsmr_idx = 0;
2449         rx_ring->curr_entry = rx_ring->cq_base;
2450
2451         /* PCI doorbell mem area + 0x04 for valid register */
2452         rx_ring->valid_db_reg = doorbell_area + 0x04;
2453
2454         /* PCI doorbell mem area + 0x18 for large buffer consumer */
2455         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
2456
2457         /* PCI doorbell mem area + 0x1c */
2458         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
2459
2460         memset((void *)cqicb, 0, sizeof(struct cqicb));
2461         cqicb->msix_vect = rx_ring->irq;
2462
2463         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2464         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
2465
2466         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2467
2468         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2469
2470         /*
2471          * Set up the control block load flags.
2472          */
2473         cqicb->flags = FLAGS_LC |       /* Load queue base address */
2474             FLAGS_LV |          /* Load MSI-X vector */
2475             FLAGS_LI;           /* Load irq delay values */
2476         if (rx_ring->lbq_len) {
2477                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
2478                 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2479                 cqicb->lbq_addr =
2480                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2481                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2482                         (u16) rx_ring->lbq_buf_size;
2483                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2484                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2485                         (u16) rx_ring->lbq_len;
2486                 cqicb->lbq_len = cpu_to_le16(bq_len);
2487                 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2488                 rx_ring->lbq_curr_idx = 0;
2489                 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2490                 rx_ring->lbq_free_cnt = 16;
2491         }
2492         if (rx_ring->sbq_len) {
2493                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
2494                 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2495                 cqicb->sbq_addr =
2496                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2497                 cqicb->sbq_buf_size =
2498                     cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2499                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2500                         (u16) rx_ring->sbq_len;
2501                 cqicb->sbq_len = cpu_to_le16(bq_len);
2502                 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2503                 rx_ring->sbq_curr_idx = 0;
2504                 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2505                 rx_ring->sbq_free_cnt = 16;
2506         }
2507         switch (rx_ring->type) {
2508         case TX_Q:
2509                 /* If there's only one interrupt, then we use
2510                  * worker threads to process the outbound
2511                  * completion handling rx_rings. We do this so
2512                  * they can be run on multiple CPUs. There is
2513                  * room to play with this more where we would only
2514                  * run in a worker if there are more than x number
2515                  * of outbound completions on the queue and more
2516                  * than one queue active.  Some threshold that
2517                  * would indicate a benefit in spite of the cost
2518                  * of a context switch.
2519                  * If there's more than one interrupt, then the
2520                  * outbound completions are processed in the ISR.
2521                  */
2522                 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2523                         INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2524                 else {
2525                         /* With all debug warnings on we see a WARN_ON message
2526                          * when we free the skb in the interrupt context.
2527                          */
2528                         INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2529                 }
2530                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2531                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2532                 break;
2533         case DEFAULT_Q:
2534                 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2535                 cqicb->irq_delay = 0;
2536                 cqicb->pkt_delay = 0;
2537                 break;
2538         case RX_Q:
2539                 /* Inbound completion handling rx_rings run in
2540                  * separate NAPI contexts.
2541                  */
2542                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2543                                64);
2544                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2545                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2546                 break;
2547         default:
2548                 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2549                         rx_ring->type);
2550         }
2551         QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2552         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2553                            CFG_LCQ, rx_ring->cq_id);
2554         if (err) {
2555                 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2556                 return err;
2557         }
2558         QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2559         /*
2560          * Advance the producer index for the buffer queues.
2561          */
2562         wmb();
2563         if (rx_ring->lbq_len)
2564                 ql_write_db_reg(rx_ring->lbq_prod_idx,
2565                                 rx_ring->lbq_prod_idx_db_reg);
2566         if (rx_ring->sbq_len)
2567                 ql_write_db_reg(rx_ring->sbq_prod_idx,
2568                                 rx_ring->sbq_prod_idx_db_reg);
2569         return err;
2570 }
2571
2572 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2573 {
2574         struct wqicb *wqicb = (struct wqicb *)tx_ring;
2575         void __iomem *doorbell_area =
2576             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2577         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2578             (tx_ring->wq_id * sizeof(u64));
2579         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2580             (tx_ring->wq_id * sizeof(u64));
2581         int err = 0;
2582
2583         /*
2584          * Assign doorbell registers for this tx_ring.
2585          */
2586         /* TX PCI doorbell mem area for tx producer index */
2587         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
2588         tx_ring->prod_idx = 0;
2589         /* TX PCI doorbell mem area + 0x04 */
2590         tx_ring->valid_db_reg = doorbell_area + 0x04;
2591
2592         /*
2593          * Assign shadow registers for this tx_ring.
2594          */
2595         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2596         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2597
2598         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2599         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2600                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2601         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2602         wqicb->rid = 0;
2603         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
2604
2605         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
2606
2607         ql_init_tx_ring(qdev, tx_ring);
2608
2609         err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2610                            (u16) tx_ring->wq_id);
2611         if (err) {
2612                 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2613                 return err;
2614         }
2615         QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2616         return err;
2617 }
2618
2619 static void ql_disable_msix(struct ql_adapter *qdev)
2620 {
2621         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2622                 pci_disable_msix(qdev->pdev);
2623                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2624                 kfree(qdev->msi_x_entry);
2625                 qdev->msi_x_entry = NULL;
2626         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2627                 pci_disable_msi(qdev->pdev);
2628                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2629         }
2630 }
2631
2632 static void ql_enable_msix(struct ql_adapter *qdev)
2633 {
2634         int i;
2635
2636         qdev->intr_count = 1;
2637         /* Get the MSIX vectors. */
2638         if (irq_type == MSIX_IRQ) {
2639                 /* Try to alloc space for the msix struct,
2640                  * if it fails then go to MSI/legacy.
2641                  */
2642                 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2643                                             sizeof(struct msix_entry),
2644                                             GFP_KERNEL);
2645                 if (!qdev->msi_x_entry) {
2646                         irq_type = MSI_IRQ;
2647                         goto msi;
2648                 }
2649
2650                 for (i = 0; i < qdev->rx_ring_count; i++)
2651                         qdev->msi_x_entry[i].entry = i;
2652
2653                 if (!pci_enable_msix
2654                     (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2655                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
2656                         qdev->intr_count = qdev->rx_ring_count;
2657                         QPRINTK(qdev, IFUP, INFO,
2658                                 "MSI-X Enabled, got %d vectors.\n",
2659                                 qdev->intr_count);
2660                         return;
2661                 } else {
2662                         kfree(qdev->msi_x_entry);
2663                         qdev->msi_x_entry = NULL;
2664                         QPRINTK(qdev, IFUP, WARNING,
2665                                 "MSI-X Enable failed, trying MSI.\n");
2666                         irq_type = MSI_IRQ;
2667                 }
2668         }
2669 msi:
2670         if (irq_type == MSI_IRQ) {
2671                 if (!pci_enable_msi(qdev->pdev)) {
2672                         set_bit(QL_MSI_ENABLED, &qdev->flags);
2673                         QPRINTK(qdev, IFUP, INFO,
2674                                 "Running with MSI interrupts.\n");
2675                         return;
2676                 }
2677         }
2678         irq_type = LEG_IRQ;
2679         QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2680 }
2681
2682 /*
2683  * Here we build the intr_context structures based on
2684  * our rx_ring count and intr vector count.
2685  * The intr_context structure is used to hook each vector
2686  * to possibly different handlers.
2687  */
2688 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2689 {
2690         int i = 0;
2691         struct intr_context *intr_context = &qdev->intr_context[0];
2692
2693         ql_enable_msix(qdev);
2694
2695         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2696                 /* Each rx_ring has it's
2697                  * own intr_context since we have separate
2698                  * vectors for each queue.
2699                  * This only true when MSI-X is enabled.
2700                  */
2701                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2702                         qdev->rx_ring[i].irq = i;
2703                         intr_context->intr = i;
2704                         intr_context->qdev = qdev;
2705                         /*
2706                          * We set up each vectors enable/disable/read bits so
2707                          * there's no bit/mask calculations in the critical path.
2708                          */
2709                         intr_context->intr_en_mask =
2710                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2711                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2712                             | i;
2713                         intr_context->intr_dis_mask =
2714                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2715                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2716                             INTR_EN_IHD | i;
2717                         intr_context->intr_read_mask =
2718                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2719                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2720                             i;
2721
2722                         if (i == 0) {
2723                                 /*
2724                                  * Default queue handles bcast/mcast plus
2725                                  * async events.  Needs buffers.
2726                                  */
2727                                 intr_context->handler = qlge_isr;
2728                                 sprintf(intr_context->name, "%s-default-queue",
2729                                         qdev->ndev->name);
2730                         } else if (i < qdev->rss_ring_first_cq_id) {
2731                                 /*
2732                                  * Outbound queue is for outbound completions only.
2733                                  */
2734                                 intr_context->handler = qlge_msix_tx_isr;
2735                                 sprintf(intr_context->name, "%s-tx-%d",
2736                                         qdev->ndev->name, i);
2737                         } else {
2738                                 /*
2739                                  * Inbound queues handle unicast frames only.
2740                                  */
2741                                 intr_context->handler = qlge_msix_rx_isr;
2742                                 sprintf(intr_context->name, "%s-rx-%d",
2743                                         qdev->ndev->name, i);
2744                         }
2745                 }
2746         } else {
2747                 /*
2748                  * All rx_rings use the same intr_context since
2749                  * there is only one vector.
2750                  */
2751                 intr_context->intr = 0;
2752                 intr_context->qdev = qdev;
2753                 /*
2754                  * We set up each vectors enable/disable/read bits so
2755                  * there's no bit/mask calculations in the critical path.
2756                  */
2757                 intr_context->intr_en_mask =
2758                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2759                 intr_context->intr_dis_mask =
2760                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2761                     INTR_EN_TYPE_DISABLE;
2762                 intr_context->intr_read_mask =
2763                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2764                 /*
2765                  * Single interrupt means one handler for all rings.
2766                  */
2767                 intr_context->handler = qlge_isr;
2768                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2769                 for (i = 0; i < qdev->rx_ring_count; i++)
2770                         qdev->rx_ring[i].irq = 0;
2771         }
2772 }
2773
2774 static void ql_free_irq(struct ql_adapter *qdev)
2775 {
2776         int i;
2777         struct intr_context *intr_context = &qdev->intr_context[0];
2778
2779         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2780                 if (intr_context->hooked) {
2781                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2782                                 free_irq(qdev->msi_x_entry[i].vector,
2783                                          &qdev->rx_ring[i]);
2784                                 QPRINTK(qdev, IFDOWN, ERR,
2785                                         "freeing msix interrupt %d.\n", i);
2786                         } else {
2787                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2788                                 QPRINTK(qdev, IFDOWN, ERR,
2789                                         "freeing msi interrupt %d.\n", i);
2790                         }
2791                 }
2792         }
2793         ql_disable_msix(qdev);
2794 }
2795
2796 static int ql_request_irq(struct ql_adapter *qdev)
2797 {
2798         int i;
2799         int status = 0;
2800         struct pci_dev *pdev = qdev->pdev;
2801         struct intr_context *intr_context = &qdev->intr_context[0];
2802
2803         ql_resolve_queues_to_irqs(qdev);
2804
2805         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2806                 atomic_set(&intr_context->irq_cnt, 0);
2807                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2808                         status = request_irq(qdev->msi_x_entry[i].vector,
2809                                              intr_context->handler,
2810                                              0,
2811                                              intr_context->name,
2812                                              &qdev->rx_ring[i]);
2813                         if (status) {
2814                                 QPRINTK(qdev, IFUP, ERR,
2815                                         "Failed request for MSIX interrupt %d.\n",
2816                                         i);
2817                                 goto err_irq;
2818                         } else {
2819                                 QPRINTK(qdev, IFUP, INFO,
2820                                         "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2821                                         i,
2822                                         qdev->rx_ring[i].type ==
2823                                         DEFAULT_Q ? "DEFAULT_Q" : "",
2824                                         qdev->rx_ring[i].type ==
2825                                         TX_Q ? "TX_Q" : "",
2826                                         qdev->rx_ring[i].type ==
2827                                         RX_Q ? "RX_Q" : "", intr_context->name);
2828                         }
2829                 } else {
2830                         QPRINTK(qdev, IFUP, DEBUG,
2831                                 "trying msi or legacy interrupts.\n");
2832                         QPRINTK(qdev, IFUP, DEBUG,
2833                                 "%s: irq = %d.\n", __func__, pdev->irq);
2834                         QPRINTK(qdev, IFUP, DEBUG,
2835                                 "%s: context->name = %s.\n", __func__,
2836                                intr_context->name);
2837                         QPRINTK(qdev, IFUP, DEBUG,
2838                                 "%s: dev_id = 0x%p.\n", __func__,
2839                                &qdev->rx_ring[0]);
2840                         status =
2841                             request_irq(pdev->irq, qlge_isr,
2842                                         test_bit(QL_MSI_ENABLED,
2843                                                  &qdev->
2844                                                  flags) ? 0 : IRQF_SHARED,
2845                                         intr_context->name, &qdev->rx_ring[0]);
2846                         if (status)
2847                                 goto err_irq;
2848
2849                         QPRINTK(qdev, IFUP, ERR,
2850                                 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2851                                 i,
2852                                 qdev->rx_ring[0].type ==
2853                                 DEFAULT_Q ? "DEFAULT_Q" : "",
2854                                 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2855                                 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2856                                 intr_context->name);
2857                 }
2858                 intr_context->hooked = 1;
2859         }
2860         return status;
2861 err_irq:
2862         QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2863         ql_free_irq(qdev);
2864         return status;
2865 }
2866
2867 static int ql_start_rss(struct ql_adapter *qdev)
2868 {
2869         struct ricb *ricb = &qdev->ricb;
2870         int status = 0;
2871         int i;
2872         u8 *hash_id = (u8 *) ricb->hash_cq_id;
2873
2874         memset((void *)ricb, 0, sizeof(ricb));
2875
2876         ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2877         ricb->flags =
2878             (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2879              RSS_RT6);
2880         ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2881
2882         /*
2883          * Fill out the Indirection Table.
2884          */
2885         for (i = 0; i < 32; i++)
2886                 hash_id[i] = i & 1;
2887
2888         /*
2889          * Random values for the IPv6 and IPv4 Hash Keys.
2890          */
2891         get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2892         get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2893
2894         QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2895
2896         status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2897         if (status) {
2898                 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2899                 return status;
2900         }
2901         QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2902         return status;
2903 }
2904
2905 /* Initialize the frame-to-queue routing. */
2906 static int ql_route_initialize(struct ql_adapter *qdev)
2907 {
2908         int status = 0;
2909         int i;
2910
2911         /* Clear all the entries in the routing table. */
2912         for (i = 0; i < 16; i++) {
2913                 status = ql_set_routing_reg(qdev, i, 0, 0);
2914                 if (status) {
2915                         QPRINTK(qdev, IFUP, ERR,
2916                                 "Failed to init routing register for CAM packets.\n");
2917                         return status;
2918                 }
2919         }
2920
2921         status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2922         if (status) {
2923                 QPRINTK(qdev, IFUP, ERR,
2924                         "Failed to init routing register for error packets.\n");
2925                 return status;
2926         }
2927         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2928         if (status) {
2929                 QPRINTK(qdev, IFUP, ERR,
2930                         "Failed to init routing register for broadcast packets.\n");
2931                 return status;
2932         }
2933         /* If we have more than one inbound queue, then turn on RSS in the
2934          * routing block.
2935          */
2936         if (qdev->rss_ring_count > 1) {
2937                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2938                                         RT_IDX_RSS_MATCH, 1);
2939                 if (status) {
2940                         QPRINTK(qdev, IFUP, ERR,
2941                                 "Failed to init routing register for MATCH RSS packets.\n");
2942                         return status;
2943                 }
2944         }
2945
2946         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2947                                     RT_IDX_CAM_HIT, 1);
2948         if (status) {
2949                 QPRINTK(qdev, IFUP, ERR,
2950                         "Failed to init routing register for CAM packets.\n");
2951                 return status;
2952         }
2953         return status;
2954 }
2955
2956 static int ql_adapter_initialize(struct ql_adapter *qdev)
2957 {
2958         u32 value, mask;
2959         int i;
2960         int status = 0;
2961
2962         /*
2963          * Set up the System register to halt on errors.
2964          */
2965         value = SYS_EFE | SYS_FAE;
2966         mask = value << 16;
2967         ql_write32(qdev, SYS, mask | value);
2968
2969         /* Set the default queue. */
2970         value = NIC_RCV_CFG_DFQ;
2971         mask = NIC_RCV_CFG_DFQ_MASK;
2972         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
2973
2974         /* Set the MPI interrupt to enabled. */
2975         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
2976
2977         /* Enable the function, set pagesize, enable error checking. */
2978         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
2979             FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
2980
2981         /* Set/clear header splitting. */
2982         mask = FSC_VM_PAGESIZE_MASK |
2983             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
2984         ql_write32(qdev, FSC, mask | value);
2985
2986         ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
2987                 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
2988
2989         /* Start up the rx queues. */
2990         for (i = 0; i < qdev->rx_ring_count; i++) {
2991                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
2992                 if (status) {
2993                         QPRINTK(qdev, IFUP, ERR,
2994                                 "Failed to start rx ring[%d].\n", i);
2995                         return status;
2996                 }
2997         }
2998
2999         /* If there is more than one inbound completion queue
3000          * then download a RICB to configure RSS.
3001          */
3002         if (qdev->rss_ring_count > 1) {
3003                 status = ql_start_rss(qdev);
3004                 if (status) {
3005                         QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3006                         return status;
3007                 }
3008         }
3009
3010         /* Start up the tx queues. */
3011         for (i = 0; i < qdev->tx_ring_count; i++) {
3012                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3013                 if (status) {
3014                         QPRINTK(qdev, IFUP, ERR,
3015                                 "Failed to start tx ring[%d].\n", i);
3016                         return status;
3017                 }
3018         }
3019
3020         status = ql_port_initialize(qdev);
3021         if (status) {
3022                 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3023                 return status;
3024         }
3025
3026         status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3027                                      MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3028         if (status) {
3029                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3030                 return status;
3031         }
3032
3033         status = ql_route_initialize(qdev);
3034         if (status) {
3035                 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3036                 return status;
3037         }
3038
3039         /* Start NAPI for the RSS queues. */
3040         for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3041                 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3042                         i);
3043                 napi_enable(&qdev->rx_ring[i].napi);
3044         }
3045
3046         return status;
3047 }
3048
3049 /* Issue soft reset to chip. */
3050 static int ql_adapter_reset(struct ql_adapter *qdev)
3051 {
3052         u32 value;
3053         int max_wait_time;
3054         int status = 0;
3055         int resetCnt = 0;
3056
3057 #define MAX_RESET_CNT   1
3058 issueReset:
3059         resetCnt++;
3060         QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3061         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3062         /* Wait for reset to complete. */
3063         max_wait_time = 3;
3064         QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3065                 max_wait_time);
3066         do {
3067                 value = ql_read32(qdev, RST_FO);
3068                 if ((value & RST_FO_FR) == 0)
3069                         break;
3070
3071                 ssleep(1);
3072         } while ((--max_wait_time));
3073         if (value & RST_FO_FR) {
3074                 QPRINTK(qdev, IFDOWN, ERR,
3075                         "Stuck in SoftReset:  FSC_SR:0x%08x\n", value);
3076                 if (resetCnt < MAX_RESET_CNT)
3077                         goto issueReset;
3078         }
3079         if (max_wait_time == 0) {
3080                 status = -ETIMEDOUT;
3081                 QPRINTK(qdev, IFDOWN, ERR,
3082                         "ETIMEOUT!!! errored out of resetting the chip!\n");
3083         }
3084
3085         return status;
3086 }
3087
3088 static void ql_display_dev_info(struct net_device *ndev)
3089 {
3090         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3091
3092         QPRINTK(qdev, PROBE, INFO,
3093                 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3094                 "XG Roll = %d, XG Rev = %d.\n",
3095                 qdev->func,
3096                 qdev->chip_rev_id & 0x0000000f,
3097                 qdev->chip_rev_id >> 4 & 0x0000000f,
3098                 qdev->chip_rev_id >> 8 & 0x0000000f,
3099                 qdev->chip_rev_id >> 12 & 0x0000000f);
3100         QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3101 }
3102
3103 static int ql_adapter_down(struct ql_adapter *qdev)
3104 {
3105         struct net_device *ndev = qdev->ndev;
3106         int i, status = 0;
3107         struct rx_ring *rx_ring;
3108
3109         netif_stop_queue(ndev);
3110         netif_carrier_off(ndev);
3111
3112         /* Don't kill the reset worker thread if we
3113          * are in the process of recovery.
3114          */
3115         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3116                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3117         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3118         cancel_delayed_work_sync(&qdev->mpi_work);
3119
3120         /* The default queue at index 0 is always processed in
3121          * a workqueue.
3122          */
3123         cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3124
3125         /* The rest of the rx_rings are processed in
3126          * a workqueue only if it's a single interrupt
3127          * environment (MSI/Legacy).
3128          */
3129         for (i = 1; i < qdev->rx_ring_count; i++) {
3130                 rx_ring = &qdev->rx_ring[i];
3131                 /* Only the RSS rings use NAPI on multi irq
3132                  * environment.  Outbound completion processing
3133                  * is done in interrupt context.
3134                  */
3135                 if (i >= qdev->rss_ring_first_cq_id) {
3136                         napi_disable(&rx_ring->napi);
3137                 } else {
3138                         cancel_delayed_work_sync(&rx_ring->rx_work);
3139                 }
3140         }
3141
3142         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3143
3144         ql_disable_interrupts(qdev);
3145
3146         ql_tx_ring_clean(qdev);
3147
3148         spin_lock(&qdev->hw_lock);
3149         status = ql_adapter_reset(qdev);
3150         if (status)
3151                 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3152                         qdev->func);
3153         spin_unlock(&qdev->hw_lock);
3154         return status;
3155 }
3156
3157 static int ql_adapter_up(struct ql_adapter *qdev)
3158 {
3159         int err = 0;
3160
3161         spin_lock(&qdev->hw_lock);
3162         err = ql_adapter_initialize(qdev);
3163         if (err) {
3164                 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3165                 spin_unlock(&qdev->hw_lock);
3166                 goto err_init;
3167         }
3168         spin_unlock(&qdev->hw_lock);
3169         set_bit(QL_ADAPTER_UP, &qdev->flags);
3170         ql_enable_interrupts(qdev);
3171         ql_enable_all_completion_interrupts(qdev);
3172         if ((ql_read32(qdev, STS) & qdev->port_init)) {
3173                 netif_carrier_on(qdev->ndev);
3174                 netif_start_queue(qdev->ndev);
3175         }
3176
3177         return 0;
3178 err_init:
3179         ql_adapter_reset(qdev);
3180         return err;
3181 }
3182
3183 static int ql_cycle_adapter(struct ql_adapter *qdev)
3184 {
3185         int status;
3186
3187         status = ql_adapter_down(qdev);
3188         if (status)
3189                 goto error;
3190
3191         status = ql_adapter_up(qdev);
3192         if (status)
3193                 goto error;
3194
3195         return status;
3196 error:
3197         QPRINTK(qdev, IFUP, ALERT,
3198                 "Driver up/down cycle failed, closing device\n");
3199         rtnl_lock();
3200         dev_close(qdev->ndev);
3201         rtnl_unlock();
3202         return status;
3203 }
3204
3205 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3206 {
3207         ql_free_mem_resources(qdev);
3208         ql_free_irq(qdev);
3209 }
3210
3211 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3212 {
3213         int status = 0;
3214
3215         if (ql_alloc_mem_resources(qdev)) {
3216                 QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
3217                 return -ENOMEM;
3218         }
3219         status = ql_request_irq(qdev);
3220         if (status)
3221                 goto err_irq;
3222         return status;
3223 err_irq:
3224         ql_free_mem_resources(qdev);
3225         return status;
3226 }
3227
3228 static int qlge_close(struct net_device *ndev)
3229 {
3230         struct ql_adapter *qdev = netdev_priv(ndev);
3231
3232         /*
3233          * Wait for device to recover from a reset.
3234          * (Rarely happens, but possible.)
3235          */
3236         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3237                 msleep(1);
3238         ql_adapter_down(qdev);
3239         ql_release_adapter_resources(qdev);
3240         return 0;
3241 }
3242
3243 static int ql_configure_rings(struct ql_adapter *qdev)
3244 {
3245         int i;
3246         struct rx_ring *rx_ring;
3247         struct tx_ring *tx_ring;
3248         int cpu_cnt = num_online_cpus();
3249
3250         /*
3251          * For each processor present we allocate one
3252          * rx_ring for outbound completions, and one
3253          * rx_ring for inbound completions.  Plus there is
3254          * always the one default queue.  For the CPU
3255          * counts we end up with the following rx_rings:
3256          * rx_ring count =
3257          *  one default queue +
3258          *  (CPU count * outbound completion rx_ring) +
3259          *  (CPU count * inbound (RSS) completion rx_ring)
3260          * To keep it simple we limit the total number of
3261          * queues to < 32, so we truncate CPU to 8.
3262          * This limitation can be removed when requested.
3263          */
3264
3265         if (cpu_cnt > MAX_CPUS)
3266                 cpu_cnt = MAX_CPUS;
3267
3268         /*
3269          * rx_ring[0] is always the default queue.
3270          */
3271         /* Allocate outbound completion ring for each CPU. */
3272         qdev->tx_ring_count = cpu_cnt;
3273         /* Allocate inbound completion (RSS) ring for each CPU. */
3274         qdev->rss_ring_count = cpu_cnt;
3275         /* cq_id for the first inbound ring handler. */
3276         qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3277         /*
3278          * qdev->rx_ring_count:
3279          * Total number of rx_rings.  This includes the one
3280          * default queue, a number of outbound completion
3281          * handler rx_rings, and the number of inbound
3282          * completion handler rx_rings.
3283          */
3284         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3285
3286         for (i = 0; i < qdev->tx_ring_count; i++) {
3287                 tx_ring = &qdev->tx_ring[i];
3288                 memset((void *)tx_ring, 0, sizeof(tx_ring));
3289                 tx_ring->qdev = qdev;
3290                 tx_ring->wq_id = i;
3291                 tx_ring->wq_len = qdev->tx_ring_size;
3292                 tx_ring->wq_size =
3293                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3294
3295                 /*
3296                  * The completion queue ID for the tx rings start
3297                  * immediately after the default Q ID, which is zero.
3298                  */
3299                 tx_ring->cq_id = i + 1;
3300         }
3301
3302         for (i = 0; i < qdev->rx_ring_count; i++) {
3303                 rx_ring = &qdev->rx_ring[i];
3304                 memset((void *)rx_ring, 0, sizeof(rx_ring));
3305                 rx_ring->qdev = qdev;
3306                 rx_ring->cq_id = i;
3307                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
3308                 if (i == 0) {   /* Default queue at index 0. */
3309                         /*
3310                          * Default queue handles bcast/mcast plus
3311                          * async events.  Needs buffers.
3312                          */
3313                         rx_ring->cq_len = qdev->rx_ring_size;
3314                         rx_ring->cq_size =
3315                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3316                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3317                         rx_ring->lbq_size =
3318                             rx_ring->lbq_len * sizeof(__le64);
3319                         rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3320                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3321                         rx_ring->sbq_size =
3322                             rx_ring->sbq_len * sizeof(__le64);
3323                         rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3324                         rx_ring->type = DEFAULT_Q;
3325                 } else if (i < qdev->rss_ring_first_cq_id) {
3326                         /*
3327                          * Outbound queue handles outbound completions only.
3328                          */
3329                         /* outbound cq is same size as tx_ring it services. */
3330                         rx_ring->cq_len = qdev->tx_ring_size;
3331                         rx_ring->cq_size =
3332                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3333                         rx_ring->lbq_len = 0;
3334                         rx_ring->lbq_size = 0;
3335                         rx_ring->lbq_buf_size = 0;
3336                         rx_ring->sbq_len = 0;
3337                         rx_ring->sbq_size = 0;
3338                         rx_ring->sbq_buf_size = 0;
3339                         rx_ring->type = TX_Q;
3340                 } else {        /* Inbound completions (RSS) queues */
3341                         /*
3342                          * Inbound queues handle unicast frames only.
3343                          */
3344                         rx_ring->cq_len = qdev->rx_ring_size;
3345                         rx_ring->cq_size =
3346                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3347                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3348                         rx_ring->lbq_size =
3349                             rx_ring->lbq_len * sizeof(__le64);
3350                         rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3351                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3352                         rx_ring->sbq_size =
3353                             rx_ring->sbq_len * sizeof(__le64);
3354                         rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3355                         rx_ring->type = RX_Q;
3356                 }
3357         }
3358         return 0;
3359 }
3360
3361 static int qlge_open(struct net_device *ndev)
3362 {
3363         int err = 0;
3364         struct ql_adapter *qdev = netdev_priv(ndev);
3365
3366         err = ql_configure_rings(qdev);
3367         if (err)
3368                 return err;
3369
3370         err = ql_get_adapter_resources(qdev);
3371         if (err)
3372                 goto error_up;
3373
3374         err = ql_adapter_up(qdev);
3375         if (err)
3376                 goto error_up;
3377
3378         return err;
3379
3380 error_up:
3381         ql_release_adapter_resources(qdev);
3382         return err;
3383 }
3384
3385 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3386 {
3387         struct ql_adapter *qdev = netdev_priv(ndev);
3388
3389         if (ndev->mtu == 1500 && new_mtu == 9000) {
3390                 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3391         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3392                 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3393         } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3394                    (ndev->mtu == 9000 && new_mtu == 9000)) {
3395                 return 0;
3396         } else
3397                 return -EINVAL;
3398         ndev->mtu = new_mtu;
3399         return 0;
3400 }
3401
3402 static struct net_device_stats *qlge_get_stats(struct net_device
3403                                                *ndev)
3404 {
3405         struct ql_adapter *qdev = netdev_priv(ndev);
3406         return &qdev->stats;
3407 }
3408
3409 static void qlge_set_multicast_list(struct net_device *ndev)
3410 {
3411         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3412         struct dev_mc_list *mc_ptr;
3413         int i;
3414
3415         spin_lock(&qdev->hw_lock);
3416         /*
3417          * Set or clear promiscuous mode if a
3418          * transition is taking place.
3419          */
3420         if (ndev->flags & IFF_PROMISC) {
3421                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3422                         if (ql_set_routing_reg
3423                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3424                                 QPRINTK(qdev, HW, ERR,
3425                                         "Failed to set promiscous mode.\n");
3426                         } else {
3427                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
3428                         }
3429                 }
3430         } else {
3431                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3432                         if (ql_set_routing_reg
3433                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3434                                 QPRINTK(qdev, HW, ERR,
3435                                         "Failed to clear promiscous mode.\n");
3436                         } else {
3437                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3438                         }
3439                 }
3440         }
3441
3442         /*
3443          * Set or clear all multicast mode if a
3444          * transition is taking place.
3445          */
3446         if ((ndev->flags & IFF_ALLMULTI) ||
3447             (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3448                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3449                         if (ql_set_routing_reg
3450                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3451                                 QPRINTK(qdev, HW, ERR,
3452                                         "Failed to set all-multi mode.\n");
3453                         } else {
3454                                 set_bit(QL_ALLMULTI, &qdev->flags);
3455                         }
3456                 }
3457         } else {
3458                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3459                         if (ql_set_routing_reg
3460                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3461                                 QPRINTK(qdev, HW, ERR,
3462                                         "Failed to clear all-multi mode.\n");
3463                         } else {
3464                                 clear_bit(QL_ALLMULTI, &qdev->flags);
3465                         }
3466                 }
3467         }
3468
3469         if (ndev->mc_count) {
3470                 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3471                      i++, mc_ptr = mc_ptr->next)
3472                         if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3473                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3474                                 QPRINTK(qdev, HW, ERR,
3475                                         "Failed to loadmulticast address.\n");
3476                                 goto exit;
3477                         }
3478                 if (ql_set_routing_reg
3479                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3480                         QPRINTK(qdev, HW, ERR,
3481                                 "Failed to set multicast match mode.\n");
3482                 } else {
3483                         set_bit(QL_ALLMULTI, &qdev->flags);
3484                 }
3485         }
3486 exit:
3487         spin_unlock(&qdev->hw_lock);
3488 }
3489
3490 static int qlge_set_mac_address(struct net_device *ndev, void *p)
3491 {
3492         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3493         struct sockaddr *addr = p;
3494         int ret = 0;
3495
3496         if (netif_running(ndev))
3497                 return -EBUSY;
3498
3499         if (!is_valid_ether_addr(addr->sa_data))
3500                 return -EADDRNOTAVAIL;
3501         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3502
3503         spin_lock(&qdev->hw_lock);
3504         if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3505                         MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3506                 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3507                 ret = -1;
3508         }
3509         spin_unlock(&qdev->hw_lock);
3510
3511         return ret;
3512 }
3513
3514 static void qlge_tx_timeout(struct net_device *ndev)
3515 {
3516         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3517         ql_queue_asic_error(qdev);
3518 }
3519
3520 static void ql_asic_reset_work(struct work_struct *work)
3521 {
3522         struct ql_adapter *qdev =
3523             container_of(work, struct ql_adapter, asic_reset_work.work);
3524         ql_cycle_adapter(qdev);
3525 }
3526
3527 static void ql_get_board_info(struct ql_adapter *qdev)
3528 {
3529         qdev->func =
3530             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3531         if (qdev->func) {
3532                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3533                 qdev->port_link_up = STS_PL1;
3534                 qdev->port_init = STS_PI1;
3535                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3536                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3537         } else {
3538                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3539                 qdev->port_link_up = STS_PL0;
3540                 qdev->port_init = STS_PI0;
3541                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3542                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3543         }
3544         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3545 }
3546
3547 static void ql_release_all(struct pci_dev *pdev)
3548 {
3549         struct net_device *ndev = pci_get_drvdata(pdev);
3550         struct ql_adapter *qdev = netdev_priv(ndev);
3551
3552         if (qdev->workqueue) {
3553                 destroy_workqueue(qdev->workqueue);
3554                 qdev->workqueue = NULL;
3555         }
3556         if (qdev->q_workqueue) {
3557                 destroy_workqueue(qdev->q_workqueue);
3558                 qdev->q_workqueue = NULL;
3559         }
3560         if (qdev->reg_base)
3561                 iounmap(qdev->reg_base);
3562         if (qdev->doorbell_area)
3563                 iounmap(qdev->doorbell_area);
3564         pci_release_regions(pdev);
3565         pci_set_drvdata(pdev, NULL);
3566 }
3567
3568 static int __devinit ql_init_device(struct pci_dev *pdev,
3569                                     struct net_device *ndev, int cards_found)
3570 {
3571         struct ql_adapter *qdev = netdev_priv(ndev);
3572         int pos, err = 0;
3573         u16 val16;
3574
3575         memset((void *)qdev, 0, sizeof(qdev));
3576         err = pci_enable_device(pdev);
3577         if (err) {
3578                 dev_err(&pdev->dev, "PCI device enable failed.\n");
3579                 return err;
3580         }
3581
3582         pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3583         if (pos <= 0) {
3584                 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3585                         "aborting.\n");
3586                 goto err_out;
3587         } else {
3588                 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3589                 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3590                 val16 |= (PCI_EXP_DEVCTL_CERE |
3591                           PCI_EXP_DEVCTL_NFERE |
3592                           PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3593                 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3594         }
3595
3596         err = pci_request_regions(pdev, DRV_NAME);
3597         if (err) {
3598                 dev_err(&pdev->dev, "PCI region request failed.\n");
3599                 goto err_out;
3600         }
3601
3602         pci_set_master(pdev);
3603         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3604                 set_bit(QL_DMA64, &qdev->flags);
3605                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3606         } else {
3607                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3608                 if (!err)
3609                        err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3610         }
3611
3612         if (err) {
3613                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3614                 goto err_out;
3615         }
3616
3617         pci_set_drvdata(pdev, ndev);
3618         qdev->reg_base =
3619             ioremap_nocache(pci_resource_start(pdev, 1),
3620                             pci_resource_len(pdev, 1));
3621         if (!qdev->reg_base) {
3622                 dev_err(&pdev->dev, "Register mapping failed.\n");
3623                 err = -ENOMEM;
3624                 goto err_out;
3625         }
3626
3627         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3628         qdev->doorbell_area =
3629             ioremap_nocache(pci_resource_start(pdev, 3),
3630                             pci_resource_len(pdev, 3));
3631         if (!qdev->doorbell_area) {
3632                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3633                 err = -ENOMEM;
3634                 goto err_out;
3635         }
3636
3637         ql_get_board_info(qdev);
3638         qdev->ndev = ndev;
3639         qdev->pdev = pdev;
3640         qdev->msg_enable = netif_msg_init(debug, default_msg);
3641         spin_lock_init(&qdev->hw_lock);
3642         spin_lock_init(&qdev->stats_lock);
3643
3644         /* make sure the EEPROM is good */
3645         err = ql_get_flash_params(qdev);
3646         if (err) {
3647                 dev_err(&pdev->dev, "Invalid FLASH.\n");
3648                 goto err_out;
3649         }
3650
3651         if (!is_valid_ether_addr(qdev->flash.mac_addr))
3652                 goto err_out;
3653
3654         memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3655         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3656
3657         /* Set up the default ring sizes. */
3658         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3659         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3660
3661         /* Set up the coalescing parameters. */
3662         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3663         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3664         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3665         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3666
3667         /*
3668          * Set up the operating parameters.
3669          */
3670         qdev->rx_csum = 1;
3671
3672         qdev->q_workqueue = create_workqueue(ndev->name);
3673         qdev->workqueue = create_singlethread_workqueue(ndev->name);
3674         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3675         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3676         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3677
3678         if (!cards_found) {
3679                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3680                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3681                          DRV_NAME, DRV_VERSION);
3682         }
3683         return 0;
3684 err_out:
3685         ql_release_all(pdev);
3686         pci_disable_device(pdev);
3687         return err;
3688 }
3689
3690
3691 static const struct net_device_ops qlge_netdev_ops = {
3692         .ndo_open               = qlge_open,
3693         .ndo_stop               = qlge_close,
3694         .ndo_start_xmit         = qlge_send,
3695         .ndo_change_mtu         = qlge_change_mtu,
3696         .ndo_get_stats          = qlge_get_stats,
3697         .ndo_set_multicast_list = qlge_set_multicast_list,
3698         .ndo_set_mac_address    = qlge_set_mac_address,
3699         .ndo_validate_addr      = eth_validate_addr,
3700         .ndo_tx_timeout         = qlge_tx_timeout,
3701         .ndo_vlan_rx_register   = ql_vlan_rx_register,
3702         .ndo_vlan_rx_add_vid    = ql_vlan_rx_add_vid,
3703         .ndo_vlan_rx_kill_vid   = ql_vlan_rx_kill_vid,
3704 };
3705
3706 static int __devinit qlge_probe(struct pci_dev *pdev,
3707                                 const struct pci_device_id *pci_entry)
3708 {
3709         struct net_device *ndev = NULL;
3710         struct ql_adapter *qdev = NULL;
3711         static int cards_found = 0;
3712         int err = 0;
3713
3714         ndev = alloc_etherdev(sizeof(struct ql_adapter));
3715         if (!ndev)
3716                 return -ENOMEM;
3717
3718         err = ql_init_device(pdev, ndev, cards_found);
3719         if (err < 0) {
3720                 free_netdev(ndev);
3721                 return err;
3722         }
3723
3724         qdev = netdev_priv(ndev);
3725         SET_NETDEV_DEV(ndev, &pdev->dev);
3726         ndev->features = (0
3727                           | NETIF_F_IP_CSUM
3728                           | NETIF_F_SG
3729                           | NETIF_F_TSO
3730                           | NETIF_F_TSO6
3731                           | NETIF_F_TSO_ECN
3732                           | NETIF_F_HW_VLAN_TX
3733                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3734
3735         if (test_bit(QL_DMA64, &qdev->flags))
3736                 ndev->features |= NETIF_F_HIGHDMA;
3737
3738         /*
3739          * Set up net_device structure.
3740          */
3741         ndev->tx_queue_len = qdev->tx_ring_size;
3742         ndev->irq = pdev->irq;
3743
3744         ndev->netdev_ops = &qlge_netdev_ops;
3745         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3746         ndev->watchdog_timeo = 10 * HZ;
3747
3748         err = register_netdev(ndev);
3749         if (err) {
3750                 dev_err(&pdev->dev, "net device registration failed.\n");
3751                 ql_release_all(pdev);
3752                 pci_disable_device(pdev);
3753                 return err;
3754         }
3755         netif_carrier_off(ndev);
3756         netif_stop_queue(ndev);
3757         ql_display_dev_info(ndev);
3758         cards_found++;
3759         return 0;
3760 }
3761
3762 static void __devexit qlge_remove(struct pci_dev *pdev)
3763 {
3764         struct net_device *ndev = pci_get_drvdata(pdev);
3765         unregister_netdev(ndev);
3766         ql_release_all(pdev);
3767         pci_disable_device(pdev);
3768         free_netdev(ndev);
3769 }
3770
3771 /*
3772  * This callback is called by the PCI subsystem whenever
3773  * a PCI bus error is detected.
3774  */
3775 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3776                                                enum pci_channel_state state)
3777 {
3778         struct net_device *ndev = pci_get_drvdata(pdev);
3779         struct ql_adapter *qdev = netdev_priv(ndev);
3780
3781         if (netif_running(ndev))
3782                 ql_adapter_down(qdev);
3783
3784         pci_disable_device(pdev);
3785
3786         /* Request a slot reset. */
3787         return PCI_ERS_RESULT_NEED_RESET;
3788 }
3789
3790 /*
3791  * This callback is called after the PCI buss has been reset.
3792  * Basically, this tries to restart the card from scratch.
3793  * This is a shortened version of the device probe/discovery code,
3794  * it resembles the first-half of the () routine.
3795  */
3796 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3797 {
3798         struct net_device *ndev = pci_get_drvdata(pdev);
3799         struct ql_adapter *qdev = netdev_priv(ndev);
3800
3801         if (pci_enable_device(pdev)) {
3802                 QPRINTK(qdev, IFUP, ERR,
3803                         "Cannot re-enable PCI device after reset.\n");
3804                 return PCI_ERS_RESULT_DISCONNECT;
3805         }
3806
3807         pci_set_master(pdev);
3808
3809         netif_carrier_off(ndev);
3810         netif_stop_queue(ndev);
3811         ql_adapter_reset(qdev);
3812
3813         /* Make sure the EEPROM is good */
3814         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3815
3816         if (!is_valid_ether_addr(ndev->perm_addr)) {
3817                 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3818                 return PCI_ERS_RESULT_DISCONNECT;
3819         }
3820
3821         return PCI_ERS_RESULT_RECOVERED;
3822 }
3823
3824 static void qlge_io_resume(struct pci_dev *pdev)
3825 {
3826         struct net_device *ndev = pci_get_drvdata(pdev);
3827         struct ql_adapter *qdev = netdev_priv(ndev);
3828
3829         pci_set_master(pdev);
3830
3831         if (netif_running(ndev)) {
3832                 if (ql_adapter_up(qdev)) {
3833                         QPRINTK(qdev, IFUP, ERR,
3834                                 "Device initialization failed after reset.\n");
3835                         return;
3836                 }
3837         }
3838
3839         netif_device_attach(ndev);
3840 }
3841
3842 static struct pci_error_handlers qlge_err_handler = {
3843         .error_detected = qlge_io_error_detected,
3844         .slot_reset = qlge_io_slot_reset,
3845         .resume = qlge_io_resume,
3846 };
3847
3848 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3849 {
3850         struct net_device *ndev = pci_get_drvdata(pdev);
3851         struct ql_adapter *qdev = netdev_priv(ndev);
3852         int err, i;
3853
3854         netif_device_detach(ndev);
3855
3856         if (netif_running(ndev)) {
3857                 err = ql_adapter_down(qdev);
3858                 if (!err)
3859                         return err;
3860         }
3861
3862         for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3863                 netif_napi_del(&qdev->rx_ring[i].napi);
3864
3865         err = pci_save_state(pdev);
3866         if (err)
3867                 return err;
3868
3869         pci_disable_device(pdev);
3870
3871         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3872
3873         return 0;
3874 }
3875
3876 #ifdef CONFIG_PM
3877 static int qlge_resume(struct pci_dev *pdev)
3878 {
3879         struct net_device *ndev = pci_get_drvdata(pdev);
3880         struct ql_adapter *qdev = netdev_priv(ndev);
3881         int err;
3882
3883         pci_set_power_state(pdev, PCI_D0);
3884         pci_restore_state(pdev);
3885         err = pci_enable_device(pdev);
3886         if (err) {
3887                 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3888                 return err;
3889         }
3890         pci_set_master(pdev);
3891
3892         pci_enable_wake(pdev, PCI_D3hot, 0);
3893         pci_enable_wake(pdev, PCI_D3cold, 0);
3894
3895         if (netif_running(ndev)) {
3896                 err = ql_adapter_up(qdev);
3897                 if (err)
3898                         return err;
3899         }
3900
3901         netif_device_attach(ndev);
3902
3903         return 0;
3904 }
3905 #endif /* CONFIG_PM */
3906
3907 static void qlge_shutdown(struct pci_dev *pdev)
3908 {
3909         qlge_suspend(pdev, PMSG_SUSPEND);
3910 }
3911
3912 static struct pci_driver qlge_driver = {
3913         .name = DRV_NAME,
3914         .id_table = qlge_pci_tbl,
3915         .probe = qlge_probe,
3916         .remove = __devexit_p(qlge_remove),
3917 #ifdef CONFIG_PM
3918         .suspend = qlge_suspend,
3919         .resume = qlge_resume,
3920 #endif
3921         .shutdown = qlge_shutdown,
3922         .err_handler = &qlge_err_handler
3923 };
3924
3925 static int __init qlge_init_module(void)
3926 {
3927         return pci_register_driver(&qlge_driver);
3928 }
3929
3930 static void __exit qlge_exit(void)
3931 {
3932         pci_unregister_driver(&qlge_driver);
3933 }
3934
3935 module_init(qlge_init_module);
3936 module_exit(qlge_exit);