cxgb3: use request_firmware() for the EDC registers setup
[safe/jmp/linux-2.6] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         CH_DEVICE(0x36, 3),     /* S320E-CR */
95         CH_DEVICE(0x37, 7),     /* N320E-G2 */
96         {0,}
97 };
98
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
104
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
106
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109
110 /*
111  * The driver uses the best interrupt scheme available on a platform in the
112  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
113  * of these schemes the driver may consider as follows:
114  *
115  * msi = 2: choose from among all three options
116  * msi = 1: only consider MSI and pin interrupts
117  * msi = 0: force pin interrupts
118  */
119 static int msi = 2;
120
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123
124 /*
125  * The driver enables offload as a default.
126  * To disable it, use ofld_disable = 1.
127  */
128
129 static int ofld_disable = 0;
130
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133
134 /*
135  * We have work elements that we need to cancel when an interface is taken
136  * down.  Normally the work elements would be executed by keventd but that
137  * can deadlock because of linkwatch.  If our close method takes the rtnl
138  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140  * for our work to complete.  Get our own work queue to solve this.
141  */
142 static struct workqueue_struct *cxgb3_wq;
143
144 /**
145  *      link_report - show link status and link speed/duplex
146  *      @p: the port whose settings are to be reported
147  *
148  *      Shows the link status, speed, and duplex of a port.
149  */
150 static void link_report(struct net_device *dev)
151 {
152         if (!netif_carrier_ok(dev))
153                 printk(KERN_INFO "%s: link down\n", dev->name);
154         else {
155                 const char *s = "10Mbps";
156                 const struct port_info *p = netdev_priv(dev);
157
158                 switch (p->link_config.speed) {
159                 case SPEED_10000:
160                         s = "10Gbps";
161                         break;
162                 case SPEED_1000:
163                         s = "1000Mbps";
164                         break;
165                 case SPEED_100:
166                         s = "100Mbps";
167                         break;
168                 }
169
170                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172         }
173 }
174
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
176 {
177         struct net_device *dev = adap->port[port_id];
178         struct port_info *pi = netdev_priv(dev);
179
180         if (state == netif_carrier_ok(dev))
181                 return;
182
183         if (state) {
184                 struct cmac *mac = &pi->mac;
185
186                 netif_carrier_on(dev);
187
188                 /* Clear local faults */
189                 t3_xgm_intr_disable(adap, pi->port_id);
190                 t3_read_reg(adap, A_XGM_INT_STATUS +
191                                     pi->mac.offset);
192                 t3_write_reg(adap,
193                              A_XGM_INT_CAUSE + pi->mac.offset,
194                              F_XGM_INT);
195
196                 t3_set_reg_field(adap,
197                                  A_XGM_INT_ENABLE +
198                                  pi->mac.offset,
199                                  F_XGM_INT, F_XGM_INT);
200                 t3_xgm_intr_enable(adap, pi->port_id);
201
202                 t3_mac_enable(mac, MAC_DIRECTION_TX);
203         } else
204                 netif_carrier_off(dev);
205
206         link_report(dev);
207 }
208
209 /**
210  *      t3_os_link_changed - handle link status changes
211  *      @adapter: the adapter associated with the link change
212  *      @port_id: the port index whose limk status has changed
213  *      @link_stat: the new status of the link
214  *      @speed: the new speed setting
215  *      @duplex: the new duplex setting
216  *      @pause: the new flow-control setting
217  *
218  *      This is the OS-dependent handler for link status changes.  The OS
219  *      neutral handler takes care of most of the processing for these events,
220  *      then calls this handler for any OS-specific processing.
221  */
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223                         int speed, int duplex, int pause)
224 {
225         struct net_device *dev = adapter->port[port_id];
226         struct port_info *pi = netdev_priv(dev);
227         struct cmac *mac = &pi->mac;
228
229         /* Skip changes from disabled ports. */
230         if (!netif_running(dev))
231                 return;
232
233         if (link_stat != netif_carrier_ok(dev)) {
234                 if (link_stat) {
235                         t3_mac_enable(mac, MAC_DIRECTION_RX);
236
237                         /* Clear local faults */
238                         t3_xgm_intr_disable(adapter, pi->port_id);
239                         t3_read_reg(adapter, A_XGM_INT_STATUS +
240                                     pi->mac.offset);
241                         t3_write_reg(adapter,
242                                      A_XGM_INT_CAUSE + pi->mac.offset,
243                                      F_XGM_INT);
244
245                         t3_set_reg_field(adapter,
246                                          A_XGM_INT_ENABLE + pi->mac.offset,
247                                          F_XGM_INT, F_XGM_INT);
248                         t3_xgm_intr_enable(adapter, pi->port_id);
249
250                         netif_carrier_on(dev);
251                 } else {
252                         netif_carrier_off(dev);
253
254                         t3_xgm_intr_disable(adapter, pi->port_id);
255                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256                         t3_set_reg_field(adapter,
257                                          A_XGM_INT_ENABLE + pi->mac.offset,
258                                          F_XGM_INT, 0);
259
260                         if (is_10G(adapter))
261                                 pi->phy.ops->power_down(&pi->phy, 1);
262
263                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264                         t3_mac_disable(mac, MAC_DIRECTION_RX);
265                         t3_link_start(&pi->phy, mac, &pi->link_config);
266                 }
267
268                 link_report(dev);
269         }
270 }
271
272 /**
273  *      t3_os_phymod_changed - handle PHY module changes
274  *      @phy: the PHY reporting the module change
275  *      @mod_type: new module type
276  *
277  *      This is the OS-dependent handler for PHY module changes.  It is
278  *      invoked when a PHY module is removed or inserted for any OS-specific
279  *      processing.
280  */
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
282 {
283         static const char *mod_str[] = {
284                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
285         };
286
287         const struct net_device *dev = adap->port[port_id];
288         const struct port_info *pi = netdev_priv(dev);
289
290         if (pi->phy.modtype == phy_modtype_none)
291                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
292         else
293                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294                        mod_str[pi->phy.modtype]);
295 }
296
297 static void cxgb_set_rxmode(struct net_device *dev)
298 {
299         struct t3_rx_mode rm;
300         struct port_info *pi = netdev_priv(dev);
301
302         init_rx_mode(&rm, dev, dev->mc_list);
303         t3_mac_set_rx_mode(&pi->mac, &rm);
304 }
305
306 /**
307  *      link_start - enable a port
308  *      @dev: the device to enable
309  *
310  *      Performs the MAC and PHY actions needed to enable a port.
311  */
312 static void link_start(struct net_device *dev)
313 {
314         struct t3_rx_mode rm;
315         struct port_info *pi = netdev_priv(dev);
316         struct cmac *mac = &pi->mac;
317
318         init_rx_mode(&rm, dev, dev->mc_list);
319         t3_mac_reset(mac);
320         t3_mac_set_mtu(mac, dev->mtu);
321         t3_mac_set_address(mac, 0, dev->dev_addr);
322         t3_mac_set_rx_mode(mac, &rm);
323         t3_link_start(&pi->phy, mac, &pi->link_config);
324         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
325 }
326
327 static inline void cxgb_disable_msi(struct adapter *adapter)
328 {
329         if (adapter->flags & USING_MSIX) {
330                 pci_disable_msix(adapter->pdev);
331                 adapter->flags &= ~USING_MSIX;
332         } else if (adapter->flags & USING_MSI) {
333                 pci_disable_msi(adapter->pdev);
334                 adapter->flags &= ~USING_MSI;
335         }
336 }
337
338 /*
339  * Interrupt handler for asynchronous events used with MSI-X.
340  */
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
342 {
343         t3_slow_intr_handler(cookie);
344         return IRQ_HANDLED;
345 }
346
347 /*
348  * Name the MSI-X interrupts.
349  */
350 static void name_msix_vecs(struct adapter *adap)
351 {
352         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
353
354         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355         adap->msix_info[0].desc[n] = 0;
356
357         for_each_port(adap, j) {
358                 struct net_device *d = adap->port[j];
359                 const struct port_info *pi = netdev_priv(d);
360
361                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362                         snprintf(adap->msix_info[msi_idx].desc, n,
363                                  "%s-%d", d->name, pi->first_qset + i);
364                         adap->msix_info[msi_idx].desc[n] = 0;
365                 }
366         }
367 }
368
369 static int request_msix_data_irqs(struct adapter *adap)
370 {
371         int i, j, err, qidx = 0;
372
373         for_each_port(adap, i) {
374                 int nqsets = adap2pinfo(adap, i)->nqsets;
375
376                 for (j = 0; j < nqsets; ++j) {
377                         err = request_irq(adap->msix_info[qidx + 1].vec,
378                                           t3_intr_handler(adap,
379                                                           adap->sge.qs[qidx].
380                                                           rspq.polling), 0,
381                                           adap->msix_info[qidx + 1].desc,
382                                           &adap->sge.qs[qidx]);
383                         if (err) {
384                                 while (--qidx >= 0)
385                                         free_irq(adap->msix_info[qidx + 1].vec,
386                                                  &adap->sge.qs[qidx]);
387                                 return err;
388                         }
389                         qidx++;
390                 }
391         }
392         return 0;
393 }
394
395 static void free_irq_resources(struct adapter *adapter)
396 {
397         if (adapter->flags & USING_MSIX) {
398                 int i, n = 0;
399
400                 free_irq(adapter->msix_info[0].vec, adapter);
401                 for_each_port(adapter, i)
402                         n += adap2pinfo(adapter, i)->nqsets;
403
404                 for (i = 0; i < n; ++i)
405                         free_irq(adapter->msix_info[i + 1].vec,
406                                  &adapter->sge.qs[i]);
407         } else
408                 free_irq(adapter->pdev->irq, adapter);
409 }
410
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
412                               unsigned long n)
413 {
414         int attempts = 5;
415
416         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
417                 if (!--attempts)
418                         return -ETIMEDOUT;
419                 msleep(10);
420         }
421         return 0;
422 }
423
424 static int init_tp_parity(struct adapter *adap)
425 {
426         int i;
427         struct sk_buff *skb;
428         struct cpl_set_tcb_field *greq;
429         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
430
431         t3_tp_set_offload_mode(adap, 1);
432
433         for (i = 0; i < 16; i++) {
434                 struct cpl_smt_write_req *req;
435
436                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437                 if (!skb)
438                         skb = adap->nofail_skb;
439                 if (!skb)
440                         goto alloc_skb_fail;
441
442                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
443                 memset(req, 0, sizeof(*req));
444                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
445                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
446                 req->iff = i;
447                 t3_mgmt_tx(adap, skb);
448                 if (skb == adap->nofail_skb) {
449                         await_mgmt_replies(adap, cnt, i + 1);
450                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451                         if (!adap->nofail_skb)
452                                 goto alloc_skb_fail;
453                 }
454         }
455
456         for (i = 0; i < 2048; i++) {
457                 struct cpl_l2t_write_req *req;
458
459                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460                 if (!skb)
461                         skb = adap->nofail_skb;
462                 if (!skb)
463                         goto alloc_skb_fail;
464
465                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
466                 memset(req, 0, sizeof(*req));
467                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
468                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
469                 req->params = htonl(V_L2T_W_IDX(i));
470                 t3_mgmt_tx(adap, skb);
471                 if (skb == adap->nofail_skb) {
472                         await_mgmt_replies(adap, cnt, 16 + i + 1);
473                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474                         if (!adap->nofail_skb)
475                                 goto alloc_skb_fail;
476                 }
477         }
478
479         for (i = 0; i < 2048; i++) {
480                 struct cpl_rte_write_req *req;
481
482                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483                 if (!skb)
484                         skb = adap->nofail_skb;
485                 if (!skb)
486                         goto alloc_skb_fail;
487
488                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
489                 memset(req, 0, sizeof(*req));
490                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
492                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
493                 t3_mgmt_tx(adap, skb);
494                 if (skb == adap->nofail_skb) {
495                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497                         if (!adap->nofail_skb)
498                                 goto alloc_skb_fail;
499                 }
500         }
501
502         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503         if (!skb)
504                 skb = adap->nofail_skb;
505         if (!skb)
506                 goto alloc_skb_fail;
507
508         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
509         memset(greq, 0, sizeof(*greq));
510         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
511         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
512         greq->mask = cpu_to_be64(1);
513         t3_mgmt_tx(adap, skb);
514
515         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516         if (skb == adap->nofail_skb) {
517                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519         }
520
521         t3_tp_set_offload_mode(adap, 0);
522         return i;
523
524 alloc_skb_fail:
525         t3_tp_set_offload_mode(adap, 0);
526         return -ENOMEM;
527 }
528
529 /**
530  *      setup_rss - configure RSS
531  *      @adap: the adapter
532  *
533  *      Sets up RSS to distribute packets to multiple receive queues.  We
534  *      configure the RSS CPU lookup table to distribute to the number of HW
535  *      receive queues, and the response queue lookup table to narrow that
536  *      down to the response queues actually configured for each port.
537  *      We always configure the RSS mapping for two ports since the mapping
538  *      table has plenty of entries.
539  */
540 static void setup_rss(struct adapter *adap)
541 {
542         int i;
543         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
544         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
545         u8 cpus[SGE_QSETS + 1];
546         u16 rspq_map[RSS_TABLE_SIZE];
547
548         for (i = 0; i < SGE_QSETS; ++i)
549                 cpus[i] = i;
550         cpus[SGE_QSETS] = 0xff; /* terminator */
551
552         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
553                 rspq_map[i] = i % nq0;
554                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
555         }
556
557         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
558                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
559                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
560 }
561
562 static void init_napi(struct adapter *adap)
563 {
564         int i;
565
566         for (i = 0; i < SGE_QSETS; i++) {
567                 struct sge_qset *qs = &adap->sge.qs[i];
568
569                 if (qs->adap)
570                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
571                                        64);
572         }
573
574         /*
575          * netif_napi_add() can be called only once per napi_struct because it
576          * adds each new napi_struct to a list.  Be careful not to call it a
577          * second time, e.g., during EEH recovery, by making a note of it.
578          */
579         adap->flags |= NAPI_INIT;
580 }
581
582 /*
583  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
584  * both netdevices representing interfaces and the dummy ones for the extra
585  * queues.
586  */
587 static void quiesce_rx(struct adapter *adap)
588 {
589         int i;
590
591         for (i = 0; i < SGE_QSETS; i++)
592                 if (adap->sge.qs[i].adap)
593                         napi_disable(&adap->sge.qs[i].napi);
594 }
595
596 static void enable_all_napi(struct adapter *adap)
597 {
598         int i;
599         for (i = 0; i < SGE_QSETS; i++)
600                 if (adap->sge.qs[i].adap)
601                         napi_enable(&adap->sge.qs[i].napi);
602 }
603
604 /**
605  *      set_qset_lro - Turn a queue set's LRO capability on and off
606  *      @dev: the device the qset is attached to
607  *      @qset_idx: the queue set index
608  *      @val: the LRO switch
609  *
610  *      Sets LRO on or off for a particular queue set.
611  *      the device's features flag is updated to reflect the LRO
612  *      capability when all queues belonging to the device are
613  *      in the same state.
614  */
615 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
616 {
617         struct port_info *pi = netdev_priv(dev);
618         struct adapter *adapter = pi->adapter;
619
620         adapter->params.sge.qset[qset_idx].lro = !!val;
621         adapter->sge.qs[qset_idx].lro_enabled = !!val;
622 }
623
624 /**
625  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
626  *      @adap: the adapter
627  *
628  *      Determines how many sets of SGE queues to use and initializes them.
629  *      We support multiple queue sets per port if we have MSI-X, otherwise
630  *      just one queue set per port.
631  */
632 static int setup_sge_qsets(struct adapter *adap)
633 {
634         int i, j, err, irq_idx = 0, qset_idx = 0;
635         unsigned int ntxq = SGE_TXQ_PER_SET;
636
637         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
638                 irq_idx = -1;
639
640         for_each_port(adap, i) {
641                 struct net_device *dev = adap->port[i];
642                 struct port_info *pi = netdev_priv(dev);
643
644                 pi->qs = &adap->sge.qs[pi->first_qset];
645                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
646                      ++j, ++qset_idx) {
647                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
648                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
649                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
650                                                              irq_idx,
651                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
652                                 netdev_get_tx_queue(dev, j));
653                         if (err) {
654                                 t3_free_sge_resources(adap);
655                                 return err;
656                         }
657                 }
658         }
659
660         return 0;
661 }
662
663 static ssize_t attr_show(struct device *d, char *buf,
664                          ssize_t(*format) (struct net_device *, char *))
665 {
666         ssize_t len;
667
668         /* Synchronize with ioctls that may shut down the device */
669         rtnl_lock();
670         len = (*format) (to_net_dev(d), buf);
671         rtnl_unlock();
672         return len;
673 }
674
675 static ssize_t attr_store(struct device *d,
676                           const char *buf, size_t len,
677                           ssize_t(*set) (struct net_device *, unsigned int),
678                           unsigned int min_val, unsigned int max_val)
679 {
680         char *endp;
681         ssize_t ret;
682         unsigned int val;
683
684         if (!capable(CAP_NET_ADMIN))
685                 return -EPERM;
686
687         val = simple_strtoul(buf, &endp, 0);
688         if (endp == buf || val < min_val || val > max_val)
689                 return -EINVAL;
690
691         rtnl_lock();
692         ret = (*set) (to_net_dev(d), val);
693         if (!ret)
694                 ret = len;
695         rtnl_unlock();
696         return ret;
697 }
698
699 #define CXGB3_SHOW(name, val_expr) \
700 static ssize_t format_##name(struct net_device *dev, char *buf) \
701 { \
702         struct port_info *pi = netdev_priv(dev); \
703         struct adapter *adap = pi->adapter; \
704         return sprintf(buf, "%u\n", val_expr); \
705 } \
706 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
707                            char *buf) \
708 { \
709         return attr_show(d, buf, format_##name); \
710 }
711
712 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
713 {
714         struct port_info *pi = netdev_priv(dev);
715         struct adapter *adap = pi->adapter;
716         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
717
718         if (adap->flags & FULL_INIT_DONE)
719                 return -EBUSY;
720         if (val && adap->params.rev == 0)
721                 return -EINVAL;
722         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
723             min_tids)
724                 return -EINVAL;
725         adap->params.mc5.nfilters = val;
726         return 0;
727 }
728
729 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
730                               const char *buf, size_t len)
731 {
732         return attr_store(d, buf, len, set_nfilters, 0, ~0);
733 }
734
735 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
736 {
737         struct port_info *pi = netdev_priv(dev);
738         struct adapter *adap = pi->adapter;
739
740         if (adap->flags & FULL_INIT_DONE)
741                 return -EBUSY;
742         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
743             MC5_MIN_TIDS)
744                 return -EINVAL;
745         adap->params.mc5.nservers = val;
746         return 0;
747 }
748
749 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
750                               const char *buf, size_t len)
751 {
752         return attr_store(d, buf, len, set_nservers, 0, ~0);
753 }
754
755 #define CXGB3_ATTR_R(name, val_expr) \
756 CXGB3_SHOW(name, val_expr) \
757 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
758
759 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
760 CXGB3_SHOW(name, val_expr) \
761 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
762
763 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
764 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
765 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
766
767 static struct attribute *cxgb3_attrs[] = {
768         &dev_attr_cam_size.attr,
769         &dev_attr_nfilters.attr,
770         &dev_attr_nservers.attr,
771         NULL
772 };
773
774 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
775
776 static ssize_t tm_attr_show(struct device *d,
777                             char *buf, int sched)
778 {
779         struct port_info *pi = netdev_priv(to_net_dev(d));
780         struct adapter *adap = pi->adapter;
781         unsigned int v, addr, bpt, cpt;
782         ssize_t len;
783
784         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
785         rtnl_lock();
786         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
787         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
788         if (sched & 1)
789                 v >>= 16;
790         bpt = (v >> 8) & 0xff;
791         cpt = v & 0xff;
792         if (!cpt)
793                 len = sprintf(buf, "disabled\n");
794         else {
795                 v = (adap->params.vpd.cclk * 1000) / cpt;
796                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
797         }
798         rtnl_unlock();
799         return len;
800 }
801
802 static ssize_t tm_attr_store(struct device *d,
803                              const char *buf, size_t len, int sched)
804 {
805         struct port_info *pi = netdev_priv(to_net_dev(d));
806         struct adapter *adap = pi->adapter;
807         unsigned int val;
808         char *endp;
809         ssize_t ret;
810
811         if (!capable(CAP_NET_ADMIN))
812                 return -EPERM;
813
814         val = simple_strtoul(buf, &endp, 0);
815         if (endp == buf || val > 10000000)
816                 return -EINVAL;
817
818         rtnl_lock();
819         ret = t3_config_sched(adap, val, sched);
820         if (!ret)
821                 ret = len;
822         rtnl_unlock();
823         return ret;
824 }
825
826 #define TM_ATTR(name, sched) \
827 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
828                            char *buf) \
829 { \
830         return tm_attr_show(d, buf, sched); \
831 } \
832 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
833                             const char *buf, size_t len) \
834 { \
835         return tm_attr_store(d, buf, len, sched); \
836 } \
837 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
838
839 TM_ATTR(sched0, 0);
840 TM_ATTR(sched1, 1);
841 TM_ATTR(sched2, 2);
842 TM_ATTR(sched3, 3);
843 TM_ATTR(sched4, 4);
844 TM_ATTR(sched5, 5);
845 TM_ATTR(sched6, 6);
846 TM_ATTR(sched7, 7);
847
848 static struct attribute *offload_attrs[] = {
849         &dev_attr_sched0.attr,
850         &dev_attr_sched1.attr,
851         &dev_attr_sched2.attr,
852         &dev_attr_sched3.attr,
853         &dev_attr_sched4.attr,
854         &dev_attr_sched5.attr,
855         &dev_attr_sched6.attr,
856         &dev_attr_sched7.attr,
857         NULL
858 };
859
860 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
861
862 /*
863  * Sends an sk_buff to an offload queue driver
864  * after dealing with any active network taps.
865  */
866 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
867 {
868         int ret;
869
870         local_bh_disable();
871         ret = t3_offload_tx(tdev, skb);
872         local_bh_enable();
873         return ret;
874 }
875
876 static int write_smt_entry(struct adapter *adapter, int idx)
877 {
878         struct cpl_smt_write_req *req;
879         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
880
881         if (!skb)
882                 return -ENOMEM;
883
884         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
885         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
886         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
887         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
888         req->iff = idx;
889         memset(req->src_mac1, 0, sizeof(req->src_mac1));
890         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
891         skb->priority = 1;
892         offload_tx(&adapter->tdev, skb);
893         return 0;
894 }
895
896 static int init_smt(struct adapter *adapter)
897 {
898         int i;
899
900         for_each_port(adapter, i)
901             write_smt_entry(adapter, i);
902         return 0;
903 }
904
905 static void init_port_mtus(struct adapter *adapter)
906 {
907         unsigned int mtus = adapter->port[0]->mtu;
908
909         if (adapter->port[1])
910                 mtus |= adapter->port[1]->mtu << 16;
911         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
912 }
913
914 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
915                               int hi, int port)
916 {
917         struct sk_buff *skb;
918         struct mngt_pktsched_wr *req;
919         int ret;
920
921         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922         if (!skb)
923                 skb = adap->nofail_skb;
924         if (!skb)
925                 return -ENOMEM;
926
927         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
928         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
929         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
930         req->sched = sched;
931         req->idx = qidx;
932         req->min = lo;
933         req->max = hi;
934         req->binding = port;
935         ret = t3_mgmt_tx(adap, skb);
936         if (skb == adap->nofail_skb) {
937                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
938                                              GFP_KERNEL);
939                 if (!adap->nofail_skb)
940                         ret = -ENOMEM;
941         }
942
943         return ret;
944 }
945
946 static int bind_qsets(struct adapter *adap)
947 {
948         int i, j, err = 0;
949
950         for_each_port(adap, i) {
951                 const struct port_info *pi = adap2pinfo(adap, i);
952
953                 for (j = 0; j < pi->nqsets; ++j) {
954                         int ret = send_pktsched_cmd(adap, 1,
955                                                     pi->first_qset + j, -1,
956                                                     -1, i);
957                         if (ret)
958                                 err = ret;
959                 }
960         }
961
962         return err;
963 }
964
965 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
966 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
967 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
968 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
969 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
970
971 static inline const char *get_edc_fw_name(int edc_idx)
972 {
973         const char *fw_name = NULL;
974
975         switch (edc_idx) {
976         case EDC_OPT_AEL2005:
977                 fw_name = AEL2005_OPT_EDC_NAME;
978                 break;
979         case EDC_TWX_AEL2005:
980                 fw_name = AEL2005_TWX_EDC_NAME;
981                 break;
982         case EDC_TWX_AEL2020:
983                 fw_name = AEL2020_TWX_EDC_NAME;
984                 break;
985         }
986         return fw_name;
987 }
988
989 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
990 {
991         struct adapter *adapter = phy->adapter;
992         const struct firmware *fw;
993         char buf[64];
994         u32 csum;
995         const __be32 *p;
996         u16 *cache = phy->phy_cache;
997         int i, ret;
998
999         snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1000
1001         ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1002         if (ret < 0) {
1003                 dev_err(&adapter->pdev->dev,
1004                         "could not upgrade firmware: unable to load %s\n",
1005                         buf);
1006                 return ret;
1007         }
1008
1009         /* check size, take checksum in account */
1010         if (fw->size > size + 4) {
1011                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1012                        (unsigned int)fw->size, size + 4);
1013                 ret = -EINVAL;
1014         }
1015
1016         /* compute checksum */
1017         p = (const __be32 *)fw->data;
1018         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1019                 csum += ntohl(p[i]);
1020
1021         if (csum != 0xffffffff) {
1022                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1023                        csum);
1024                 ret = -EINVAL;
1025         }
1026
1027         for (i = 0; i < size / 4 ; i++) {
1028                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1029                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1030         }
1031
1032         release_firmware(fw);
1033
1034         return ret;
1035 }
1036
1037 static int upgrade_fw(struct adapter *adap)
1038 {
1039         int ret;
1040         char buf[64];
1041         const struct firmware *fw;
1042         struct device *dev = &adap->pdev->dev;
1043
1044         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
1045                  FW_VERSION_MINOR, FW_VERSION_MICRO);
1046         ret = request_firmware(&fw, buf, dev);
1047         if (ret < 0) {
1048                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1049                         buf);
1050                 return ret;
1051         }
1052         ret = t3_load_fw(adap, fw->data, fw->size);
1053         release_firmware(fw);
1054
1055         if (ret == 0)
1056                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1057                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1058         else
1059                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1060                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1061
1062         return ret;
1063 }
1064
1065 static inline char t3rev2char(struct adapter *adapter)
1066 {
1067         char rev = 0;
1068
1069         switch(adapter->params.rev) {
1070         case T3_REV_B:
1071         case T3_REV_B2:
1072                 rev = 'b';
1073                 break;
1074         case T3_REV_C:
1075                 rev = 'c';
1076                 break;
1077         }
1078         return rev;
1079 }
1080
1081 static int update_tpsram(struct adapter *adap)
1082 {
1083         const struct firmware *tpsram;
1084         char buf[64];
1085         struct device *dev = &adap->pdev->dev;
1086         int ret;
1087         char rev;
1088
1089         rev = t3rev2char(adap);
1090         if (!rev)
1091                 return 0;
1092
1093         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1094                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1095
1096         ret = request_firmware(&tpsram, buf, dev);
1097         if (ret < 0) {
1098                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1099                         buf);
1100                 return ret;
1101         }
1102
1103         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1104         if (ret)
1105                 goto release_tpsram;
1106
1107         ret = t3_set_proto_sram(adap, tpsram->data);
1108         if (ret == 0)
1109                 dev_info(dev,
1110                          "successful update of protocol engine "
1111                          "to %d.%d.%d\n",
1112                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1113         else
1114                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1115                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1116         if (ret)
1117                 dev_err(dev, "loading protocol SRAM failed\n");
1118
1119 release_tpsram:
1120         release_firmware(tpsram);
1121
1122         return ret;
1123 }
1124
1125 /**
1126  *      cxgb_up - enable the adapter
1127  *      @adapter: adapter being enabled
1128  *
1129  *      Called when the first port is enabled, this function performs the
1130  *      actions necessary to make an adapter operational, such as completing
1131  *      the initialization of HW modules, and enabling interrupts.
1132  *
1133  *      Must be called with the rtnl lock held.
1134  */
1135 static int cxgb_up(struct adapter *adap)
1136 {
1137         int err;
1138
1139         if (!(adap->flags & FULL_INIT_DONE)) {
1140                 err = t3_check_fw_version(adap);
1141                 if (err == -EINVAL) {
1142                         err = upgrade_fw(adap);
1143                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1144                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1145                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1146                 }
1147
1148                 err = t3_check_tpsram_version(adap);
1149                 if (err == -EINVAL) {
1150                         err = update_tpsram(adap);
1151                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1152                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1153                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1154                 }
1155
1156                 /*
1157                  * Clear interrupts now to catch errors if t3_init_hw fails.
1158                  * We clear them again later as initialization may trigger
1159                  * conditions that can interrupt.
1160                  */
1161                 t3_intr_clear(adap);
1162
1163                 err = t3_init_hw(adap, 0);
1164                 if (err)
1165                         goto out;
1166
1167                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1168                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1169
1170                 err = setup_sge_qsets(adap);
1171                 if (err)
1172                         goto out;
1173
1174                 setup_rss(adap);
1175                 if (!(adap->flags & NAPI_INIT))
1176                         init_napi(adap);
1177
1178                 t3_start_sge_timers(adap);
1179                 adap->flags |= FULL_INIT_DONE;
1180         }
1181
1182         t3_intr_clear(adap);
1183
1184         if (adap->flags & USING_MSIX) {
1185                 name_msix_vecs(adap);
1186                 err = request_irq(adap->msix_info[0].vec,
1187                                   t3_async_intr_handler, 0,
1188                                   adap->msix_info[0].desc, adap);
1189                 if (err)
1190                         goto irq_err;
1191
1192                 err = request_msix_data_irqs(adap);
1193                 if (err) {
1194                         free_irq(adap->msix_info[0].vec, adap);
1195                         goto irq_err;
1196                 }
1197         } else if ((err = request_irq(adap->pdev->irq,
1198                                       t3_intr_handler(adap,
1199                                                       adap->sge.qs[0].rspq.
1200                                                       polling),
1201                                       (adap->flags & USING_MSI) ?
1202                                        0 : IRQF_SHARED,
1203                                       adap->name, adap)))
1204                 goto irq_err;
1205
1206         enable_all_napi(adap);
1207         t3_sge_start(adap);
1208         t3_intr_enable(adap);
1209
1210         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1211             is_offload(adap) && init_tp_parity(adap) == 0)
1212                 adap->flags |= TP_PARITY_INIT;
1213
1214         if (adap->flags & TP_PARITY_INIT) {
1215                 t3_write_reg(adap, A_TP_INT_CAUSE,
1216                              F_CMCACHEPERR | F_ARPLUTPERR);
1217                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1218         }
1219
1220         if (!(adap->flags & QUEUES_BOUND)) {
1221                 err = bind_qsets(adap);
1222                 if (err) {
1223                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1224                         t3_intr_disable(adap);
1225                         free_irq_resources(adap);
1226                         goto out;
1227                 }
1228                 adap->flags |= QUEUES_BOUND;
1229         }
1230
1231 out:
1232         return err;
1233 irq_err:
1234         CH_ERR(adap, "request_irq failed, err %d\n", err);
1235         goto out;
1236 }
1237
1238 /*
1239  * Release resources when all the ports and offloading have been stopped.
1240  */
1241 static void cxgb_down(struct adapter *adapter)
1242 {
1243         t3_sge_stop(adapter);
1244         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1245         t3_intr_disable(adapter);
1246         spin_unlock_irq(&adapter->work_lock);
1247
1248         free_irq_resources(adapter);
1249         quiesce_rx(adapter);
1250         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1251 }
1252
1253 static void schedule_chk_task(struct adapter *adap)
1254 {
1255         unsigned int timeo;
1256
1257         timeo = adap->params.linkpoll_period ?
1258             (HZ * adap->params.linkpoll_period) / 10 :
1259             adap->params.stats_update_period * HZ;
1260         if (timeo)
1261                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1262 }
1263
1264 static int offload_open(struct net_device *dev)
1265 {
1266         struct port_info *pi = netdev_priv(dev);
1267         struct adapter *adapter = pi->adapter;
1268         struct t3cdev *tdev = dev2t3cdev(dev);
1269         int adap_up = adapter->open_device_map & PORT_MASK;
1270         int err;
1271
1272         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1273                 return 0;
1274
1275         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1276                 goto out;
1277
1278         t3_tp_set_offload_mode(adapter, 1);
1279         tdev->lldev = adapter->port[0];
1280         err = cxgb3_offload_activate(adapter);
1281         if (err)
1282                 goto out;
1283
1284         init_port_mtus(adapter);
1285         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1286                      adapter->params.b_wnd,
1287                      adapter->params.rev == 0 ?
1288                      adapter->port[0]->mtu : 0xffff);
1289         init_smt(adapter);
1290
1291         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1292                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1293
1294         /* Call back all registered clients */
1295         cxgb3_add_clients(tdev);
1296
1297 out:
1298         /* restore them in case the offload module has changed them */
1299         if (err) {
1300                 t3_tp_set_offload_mode(adapter, 0);
1301                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1302                 cxgb3_set_dummy_ops(tdev);
1303         }
1304         return err;
1305 }
1306
1307 static int offload_close(struct t3cdev *tdev)
1308 {
1309         struct adapter *adapter = tdev2adap(tdev);
1310
1311         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1312                 return 0;
1313
1314         /* Call back all registered clients */
1315         cxgb3_remove_clients(tdev);
1316
1317         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1318
1319         /* Flush work scheduled while releasing TIDs */
1320         flush_scheduled_work();
1321
1322         tdev->lldev = NULL;
1323         cxgb3_set_dummy_ops(tdev);
1324         t3_tp_set_offload_mode(adapter, 0);
1325         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1326
1327         if (!adapter->open_device_map)
1328                 cxgb_down(adapter);
1329
1330         cxgb3_offload_deactivate(adapter);
1331         return 0;
1332 }
1333
1334 static int cxgb_open(struct net_device *dev)
1335 {
1336         struct port_info *pi = netdev_priv(dev);
1337         struct adapter *adapter = pi->adapter;
1338         int other_ports = adapter->open_device_map & PORT_MASK;
1339         int err;
1340
1341         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1342                 return err;
1343
1344         set_bit(pi->port_id, &adapter->open_device_map);
1345         if (is_offload(adapter) && !ofld_disable) {
1346                 err = offload_open(dev);
1347                 if (err)
1348                         printk(KERN_WARNING
1349                                "Could not initialize offload capabilities\n");
1350         }
1351
1352         dev->real_num_tx_queues = pi->nqsets;
1353         link_start(dev);
1354         t3_port_intr_enable(adapter, pi->port_id);
1355         netif_tx_start_all_queues(dev);
1356         if (!other_ports)
1357                 schedule_chk_task(adapter);
1358
1359         return 0;
1360 }
1361
1362 static int cxgb_close(struct net_device *dev)
1363 {
1364         struct port_info *pi = netdev_priv(dev);
1365         struct adapter *adapter = pi->adapter;
1366
1367         
1368         if (!adapter->open_device_map)
1369                 return 0;
1370
1371         /* Stop link fault interrupts */
1372         t3_xgm_intr_disable(adapter, pi->port_id);
1373         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1374
1375         t3_port_intr_disable(adapter, pi->port_id);
1376         netif_tx_stop_all_queues(dev);
1377         pi->phy.ops->power_down(&pi->phy, 1);
1378         netif_carrier_off(dev);
1379         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1380
1381         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1382         clear_bit(pi->port_id, &adapter->open_device_map);
1383         spin_unlock_irq(&adapter->work_lock);
1384
1385         if (!(adapter->open_device_map & PORT_MASK))
1386                 cancel_delayed_work_sync(&adapter->adap_check_task);
1387
1388         if (!adapter->open_device_map)
1389                 cxgb_down(adapter);
1390
1391         return 0;
1392 }
1393
1394 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1395 {
1396         struct port_info *pi = netdev_priv(dev);
1397         struct adapter *adapter = pi->adapter;
1398         struct net_device_stats *ns = &pi->netstats;
1399         const struct mac_stats *pstats;
1400
1401         spin_lock(&adapter->stats_lock);
1402         pstats = t3_mac_update_stats(&pi->mac);
1403         spin_unlock(&adapter->stats_lock);
1404
1405         ns->tx_bytes = pstats->tx_octets;
1406         ns->tx_packets = pstats->tx_frames;
1407         ns->rx_bytes = pstats->rx_octets;
1408         ns->rx_packets = pstats->rx_frames;
1409         ns->multicast = pstats->rx_mcast_frames;
1410
1411         ns->tx_errors = pstats->tx_underrun;
1412         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1413             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1414             pstats->rx_fifo_ovfl;
1415
1416         /* detailed rx_errors */
1417         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1418         ns->rx_over_errors = 0;
1419         ns->rx_crc_errors = pstats->rx_fcs_errs;
1420         ns->rx_frame_errors = pstats->rx_symbol_errs;
1421         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1422         ns->rx_missed_errors = pstats->rx_cong_drops;
1423
1424         /* detailed tx_errors */
1425         ns->tx_aborted_errors = 0;
1426         ns->tx_carrier_errors = 0;
1427         ns->tx_fifo_errors = pstats->tx_underrun;
1428         ns->tx_heartbeat_errors = 0;
1429         ns->tx_window_errors = 0;
1430         return ns;
1431 }
1432
1433 static u32 get_msglevel(struct net_device *dev)
1434 {
1435         struct port_info *pi = netdev_priv(dev);
1436         struct adapter *adapter = pi->adapter;
1437
1438         return adapter->msg_enable;
1439 }
1440
1441 static void set_msglevel(struct net_device *dev, u32 val)
1442 {
1443         struct port_info *pi = netdev_priv(dev);
1444         struct adapter *adapter = pi->adapter;
1445
1446         adapter->msg_enable = val;
1447 }
1448
1449 static char stats_strings[][ETH_GSTRING_LEN] = {
1450         "TxOctetsOK         ",
1451         "TxFramesOK         ",
1452         "TxMulticastFramesOK",
1453         "TxBroadcastFramesOK",
1454         "TxPauseFrames      ",
1455         "TxUnderrun         ",
1456         "TxExtUnderrun      ",
1457
1458         "TxFrames64         ",
1459         "TxFrames65To127    ",
1460         "TxFrames128To255   ",
1461         "TxFrames256To511   ",
1462         "TxFrames512To1023  ",
1463         "TxFrames1024To1518 ",
1464         "TxFrames1519ToMax  ",
1465
1466         "RxOctetsOK         ",
1467         "RxFramesOK         ",
1468         "RxMulticastFramesOK",
1469         "RxBroadcastFramesOK",
1470         "RxPauseFrames      ",
1471         "RxFCSErrors        ",
1472         "RxSymbolErrors     ",
1473         "RxShortErrors      ",
1474         "RxJabberErrors     ",
1475         "RxLengthErrors     ",
1476         "RxFIFOoverflow     ",
1477
1478         "RxFrames64         ",
1479         "RxFrames65To127    ",
1480         "RxFrames128To255   ",
1481         "RxFrames256To511   ",
1482         "RxFrames512To1023  ",
1483         "RxFrames1024To1518 ",
1484         "RxFrames1519ToMax  ",
1485
1486         "PhyFIFOErrors      ",
1487         "TSO                ",
1488         "VLANextractions    ",
1489         "VLANinsertions     ",
1490         "TxCsumOffload      ",
1491         "RxCsumGood         ",
1492         "LroAggregated      ",
1493         "LroFlushed         ",
1494         "LroNoDesc          ",
1495         "RxDrops            ",
1496
1497         "CheckTXEnToggled   ",
1498         "CheckResets        ",
1499
1500         "LinkFaults         ",
1501 };
1502
1503 static int get_sset_count(struct net_device *dev, int sset)
1504 {
1505         switch (sset) {
1506         case ETH_SS_STATS:
1507                 return ARRAY_SIZE(stats_strings);
1508         default:
1509                 return -EOPNOTSUPP;
1510         }
1511 }
1512
1513 #define T3_REGMAP_SIZE (3 * 1024)
1514
1515 static int get_regs_len(struct net_device *dev)
1516 {
1517         return T3_REGMAP_SIZE;
1518 }
1519
1520 static int get_eeprom_len(struct net_device *dev)
1521 {
1522         return EEPROMSIZE;
1523 }
1524
1525 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1526 {
1527         struct port_info *pi = netdev_priv(dev);
1528         struct adapter *adapter = pi->adapter;
1529         u32 fw_vers = 0;
1530         u32 tp_vers = 0;
1531
1532         spin_lock(&adapter->stats_lock);
1533         t3_get_fw_version(adapter, &fw_vers);
1534         t3_get_tp_version(adapter, &tp_vers);
1535         spin_unlock(&adapter->stats_lock);
1536
1537         strcpy(info->driver, DRV_NAME);
1538         strcpy(info->version, DRV_VERSION);
1539         strcpy(info->bus_info, pci_name(adapter->pdev));
1540         if (!fw_vers)
1541                 strcpy(info->fw_version, "N/A");
1542         else {
1543                 snprintf(info->fw_version, sizeof(info->fw_version),
1544                          "%s %u.%u.%u TP %u.%u.%u",
1545                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1546                          G_FW_VERSION_MAJOR(fw_vers),
1547                          G_FW_VERSION_MINOR(fw_vers),
1548                          G_FW_VERSION_MICRO(fw_vers),
1549                          G_TP_VERSION_MAJOR(tp_vers),
1550                          G_TP_VERSION_MINOR(tp_vers),
1551                          G_TP_VERSION_MICRO(tp_vers));
1552         }
1553 }
1554
1555 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1556 {
1557         if (stringset == ETH_SS_STATS)
1558                 memcpy(data, stats_strings, sizeof(stats_strings));
1559 }
1560
1561 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1562                                             struct port_info *p, int idx)
1563 {
1564         int i;
1565         unsigned long tot = 0;
1566
1567         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1568                 tot += adapter->sge.qs[i].port_stats[idx];
1569         return tot;
1570 }
1571
1572 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1573                       u64 *data)
1574 {
1575         struct port_info *pi = netdev_priv(dev);
1576         struct adapter *adapter = pi->adapter;
1577         const struct mac_stats *s;
1578
1579         spin_lock(&adapter->stats_lock);
1580         s = t3_mac_update_stats(&pi->mac);
1581         spin_unlock(&adapter->stats_lock);
1582
1583         *data++ = s->tx_octets;
1584         *data++ = s->tx_frames;
1585         *data++ = s->tx_mcast_frames;
1586         *data++ = s->tx_bcast_frames;
1587         *data++ = s->tx_pause;
1588         *data++ = s->tx_underrun;
1589         *data++ = s->tx_fifo_urun;
1590
1591         *data++ = s->tx_frames_64;
1592         *data++ = s->tx_frames_65_127;
1593         *data++ = s->tx_frames_128_255;
1594         *data++ = s->tx_frames_256_511;
1595         *data++ = s->tx_frames_512_1023;
1596         *data++ = s->tx_frames_1024_1518;
1597         *data++ = s->tx_frames_1519_max;
1598
1599         *data++ = s->rx_octets;
1600         *data++ = s->rx_frames;
1601         *data++ = s->rx_mcast_frames;
1602         *data++ = s->rx_bcast_frames;
1603         *data++ = s->rx_pause;
1604         *data++ = s->rx_fcs_errs;
1605         *data++ = s->rx_symbol_errs;
1606         *data++ = s->rx_short;
1607         *data++ = s->rx_jabber;
1608         *data++ = s->rx_too_long;
1609         *data++ = s->rx_fifo_ovfl;
1610
1611         *data++ = s->rx_frames_64;
1612         *data++ = s->rx_frames_65_127;
1613         *data++ = s->rx_frames_128_255;
1614         *data++ = s->rx_frames_256_511;
1615         *data++ = s->rx_frames_512_1023;
1616         *data++ = s->rx_frames_1024_1518;
1617         *data++ = s->rx_frames_1519_max;
1618
1619         *data++ = pi->phy.fifo_errors;
1620
1621         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1622         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1623         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1624         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1625         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1626         *data++ = 0;
1627         *data++ = 0;
1628         *data++ = 0;
1629         *data++ = s->rx_cong_drops;
1630
1631         *data++ = s->num_toggled;
1632         *data++ = s->num_resets;
1633
1634         *data++ = s->link_faults;
1635 }
1636
1637 static inline void reg_block_dump(struct adapter *ap, void *buf,
1638                                   unsigned int start, unsigned int end)
1639 {
1640         u32 *p = buf + start;
1641
1642         for (; start <= end; start += sizeof(u32))
1643                 *p++ = t3_read_reg(ap, start);
1644 }
1645
1646 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1647                      void *buf)
1648 {
1649         struct port_info *pi = netdev_priv(dev);
1650         struct adapter *ap = pi->adapter;
1651
1652         /*
1653          * Version scheme:
1654          * bits 0..9: chip version
1655          * bits 10..15: chip revision
1656          * bit 31: set for PCIe cards
1657          */
1658         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1659
1660         /*
1661          * We skip the MAC statistics registers because they are clear-on-read.
1662          * Also reading multi-register stats would need to synchronize with the
1663          * periodic mac stats accumulation.  Hard to justify the complexity.
1664          */
1665         memset(buf, 0, T3_REGMAP_SIZE);
1666         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1667         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1668         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1669         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1670         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1671         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1672                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1673         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1674                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1675 }
1676
1677 static int restart_autoneg(struct net_device *dev)
1678 {
1679         struct port_info *p = netdev_priv(dev);
1680
1681         if (!netif_running(dev))
1682                 return -EAGAIN;
1683         if (p->link_config.autoneg != AUTONEG_ENABLE)
1684                 return -EINVAL;
1685         p->phy.ops->autoneg_restart(&p->phy);
1686         return 0;
1687 }
1688
1689 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1690 {
1691         struct port_info *pi = netdev_priv(dev);
1692         struct adapter *adapter = pi->adapter;
1693         int i;
1694
1695         if (data == 0)
1696                 data = 2;
1697
1698         for (i = 0; i < data * 2; i++) {
1699                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1700                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1701                 if (msleep_interruptible(500))
1702                         break;
1703         }
1704         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1705                          F_GPIO0_OUT_VAL);
1706         return 0;
1707 }
1708
1709 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1710 {
1711         struct port_info *p = netdev_priv(dev);
1712
1713         cmd->supported = p->link_config.supported;
1714         cmd->advertising = p->link_config.advertising;
1715
1716         if (netif_carrier_ok(dev)) {
1717                 cmd->speed = p->link_config.speed;
1718                 cmd->duplex = p->link_config.duplex;
1719         } else {
1720                 cmd->speed = -1;
1721                 cmd->duplex = -1;
1722         }
1723
1724         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1725         cmd->phy_address = p->phy.mdio.prtad;
1726         cmd->transceiver = XCVR_EXTERNAL;
1727         cmd->autoneg = p->link_config.autoneg;
1728         cmd->maxtxpkt = 0;
1729         cmd->maxrxpkt = 0;
1730         return 0;
1731 }
1732
1733 static int speed_duplex_to_caps(int speed, int duplex)
1734 {
1735         int cap = 0;
1736
1737         switch (speed) {
1738         case SPEED_10:
1739                 if (duplex == DUPLEX_FULL)
1740                         cap = SUPPORTED_10baseT_Full;
1741                 else
1742                         cap = SUPPORTED_10baseT_Half;
1743                 break;
1744         case SPEED_100:
1745                 if (duplex == DUPLEX_FULL)
1746                         cap = SUPPORTED_100baseT_Full;
1747                 else
1748                         cap = SUPPORTED_100baseT_Half;
1749                 break;
1750         case SPEED_1000:
1751                 if (duplex == DUPLEX_FULL)
1752                         cap = SUPPORTED_1000baseT_Full;
1753                 else
1754                         cap = SUPPORTED_1000baseT_Half;
1755                 break;
1756         case SPEED_10000:
1757                 if (duplex == DUPLEX_FULL)
1758                         cap = SUPPORTED_10000baseT_Full;
1759         }
1760         return cap;
1761 }
1762
1763 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1764                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1765                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1766                       ADVERTISED_10000baseT_Full)
1767
1768 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1769 {
1770         struct port_info *p = netdev_priv(dev);
1771         struct link_config *lc = &p->link_config;
1772
1773         if (!(lc->supported & SUPPORTED_Autoneg)) {
1774                 /*
1775                  * PHY offers a single speed/duplex.  See if that's what's
1776                  * being requested.
1777                  */
1778                 if (cmd->autoneg == AUTONEG_DISABLE) {
1779                         int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1780                         if (lc->supported & cap)
1781                                 return 0;
1782                 }
1783                 return -EINVAL;
1784         }
1785
1786         if (cmd->autoneg == AUTONEG_DISABLE) {
1787                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1788
1789                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1790                         return -EINVAL;
1791                 lc->requested_speed = cmd->speed;
1792                 lc->requested_duplex = cmd->duplex;
1793                 lc->advertising = 0;
1794         } else {
1795                 cmd->advertising &= ADVERTISED_MASK;
1796                 cmd->advertising &= lc->supported;
1797                 if (!cmd->advertising)
1798                         return -EINVAL;
1799                 lc->requested_speed = SPEED_INVALID;
1800                 lc->requested_duplex = DUPLEX_INVALID;
1801                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1802         }
1803         lc->autoneg = cmd->autoneg;
1804         if (netif_running(dev))
1805                 t3_link_start(&p->phy, &p->mac, lc);
1806         return 0;
1807 }
1808
1809 static void get_pauseparam(struct net_device *dev,
1810                            struct ethtool_pauseparam *epause)
1811 {
1812         struct port_info *p = netdev_priv(dev);
1813
1814         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1815         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1816         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1817 }
1818
1819 static int set_pauseparam(struct net_device *dev,
1820                           struct ethtool_pauseparam *epause)
1821 {
1822         struct port_info *p = netdev_priv(dev);
1823         struct link_config *lc = &p->link_config;
1824
1825         if (epause->autoneg == AUTONEG_DISABLE)
1826                 lc->requested_fc = 0;
1827         else if (lc->supported & SUPPORTED_Autoneg)
1828                 lc->requested_fc = PAUSE_AUTONEG;
1829         else
1830                 return -EINVAL;
1831
1832         if (epause->rx_pause)
1833                 lc->requested_fc |= PAUSE_RX;
1834         if (epause->tx_pause)
1835                 lc->requested_fc |= PAUSE_TX;
1836         if (lc->autoneg == AUTONEG_ENABLE) {
1837                 if (netif_running(dev))
1838                         t3_link_start(&p->phy, &p->mac, lc);
1839         } else {
1840                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1841                 if (netif_running(dev))
1842                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1843         }
1844         return 0;
1845 }
1846
1847 static u32 get_rx_csum(struct net_device *dev)
1848 {
1849         struct port_info *p = netdev_priv(dev);
1850
1851         return p->rx_offload & T3_RX_CSUM;
1852 }
1853
1854 static int set_rx_csum(struct net_device *dev, u32 data)
1855 {
1856         struct port_info *p = netdev_priv(dev);
1857
1858         if (data) {
1859                 p->rx_offload |= T3_RX_CSUM;
1860         } else {
1861                 int i;
1862
1863                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1864                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1865                         set_qset_lro(dev, i, 0);
1866         }
1867         return 0;
1868 }
1869
1870 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1871 {
1872         struct port_info *pi = netdev_priv(dev);
1873         struct adapter *adapter = pi->adapter;
1874         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1875
1876         e->rx_max_pending = MAX_RX_BUFFERS;
1877         e->rx_mini_max_pending = 0;
1878         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1879         e->tx_max_pending = MAX_TXQ_ENTRIES;
1880
1881         e->rx_pending = q->fl_size;
1882         e->rx_mini_pending = q->rspq_size;
1883         e->rx_jumbo_pending = q->jumbo_size;
1884         e->tx_pending = q->txq_size[0];
1885 }
1886
1887 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1888 {
1889         struct port_info *pi = netdev_priv(dev);
1890         struct adapter *adapter = pi->adapter;
1891         struct qset_params *q;
1892         int i;
1893
1894         if (e->rx_pending > MAX_RX_BUFFERS ||
1895             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1896             e->tx_pending > MAX_TXQ_ENTRIES ||
1897             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1898             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1899             e->rx_pending < MIN_FL_ENTRIES ||
1900             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1901             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1902                 return -EINVAL;
1903
1904         if (adapter->flags & FULL_INIT_DONE)
1905                 return -EBUSY;
1906
1907         q = &adapter->params.sge.qset[pi->first_qset];
1908         for (i = 0; i < pi->nqsets; ++i, ++q) {
1909                 q->rspq_size = e->rx_mini_pending;
1910                 q->fl_size = e->rx_pending;
1911                 q->jumbo_size = e->rx_jumbo_pending;
1912                 q->txq_size[0] = e->tx_pending;
1913                 q->txq_size[1] = e->tx_pending;
1914                 q->txq_size[2] = e->tx_pending;
1915         }
1916         return 0;
1917 }
1918
1919 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1920 {
1921         struct port_info *pi = netdev_priv(dev);
1922         struct adapter *adapter = pi->adapter;
1923         struct qset_params *qsp = &adapter->params.sge.qset[0];
1924         struct sge_qset *qs = &adapter->sge.qs[0];
1925
1926         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1927                 return -EINVAL;
1928
1929         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1930         t3_update_qset_coalesce(qs, qsp);
1931         return 0;
1932 }
1933
1934 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1935 {
1936         struct port_info *pi = netdev_priv(dev);
1937         struct adapter *adapter = pi->adapter;
1938         struct qset_params *q = adapter->params.sge.qset;
1939
1940         c->rx_coalesce_usecs = q->coalesce_usecs;
1941         return 0;
1942 }
1943
1944 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1945                       u8 * data)
1946 {
1947         struct port_info *pi = netdev_priv(dev);
1948         struct adapter *adapter = pi->adapter;
1949         int i, err = 0;
1950
1951         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1952         if (!buf)
1953                 return -ENOMEM;
1954
1955         e->magic = EEPROM_MAGIC;
1956         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1957                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1958
1959         if (!err)
1960                 memcpy(data, buf + e->offset, e->len);
1961         kfree(buf);
1962         return err;
1963 }
1964
1965 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1966                       u8 * data)
1967 {
1968         struct port_info *pi = netdev_priv(dev);
1969         struct adapter *adapter = pi->adapter;
1970         u32 aligned_offset, aligned_len;
1971         __le32 *p;
1972         u8 *buf;
1973         int err;
1974
1975         if (eeprom->magic != EEPROM_MAGIC)
1976                 return -EINVAL;
1977
1978         aligned_offset = eeprom->offset & ~3;
1979         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1980
1981         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1982                 buf = kmalloc(aligned_len, GFP_KERNEL);
1983                 if (!buf)
1984                         return -ENOMEM;
1985                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1986                 if (!err && aligned_len > 4)
1987                         err = t3_seeprom_read(adapter,
1988                                               aligned_offset + aligned_len - 4,
1989                                               (__le32 *) & buf[aligned_len - 4]);
1990                 if (err)
1991                         goto out;
1992                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1993         } else
1994                 buf = data;
1995
1996         err = t3_seeprom_wp(adapter, 0);
1997         if (err)
1998                 goto out;
1999
2000         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2001                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2002                 aligned_offset += 4;
2003         }
2004
2005         if (!err)
2006                 err = t3_seeprom_wp(adapter, 1);
2007 out:
2008         if (buf != data)
2009                 kfree(buf);
2010         return err;
2011 }
2012
2013 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2014 {
2015         wol->supported = 0;
2016         wol->wolopts = 0;
2017         memset(&wol->sopass, 0, sizeof(wol->sopass));
2018 }
2019
2020 static const struct ethtool_ops cxgb_ethtool_ops = {
2021         .get_settings = get_settings,
2022         .set_settings = set_settings,
2023         .get_drvinfo = get_drvinfo,
2024         .get_msglevel = get_msglevel,
2025         .set_msglevel = set_msglevel,
2026         .get_ringparam = get_sge_param,
2027         .set_ringparam = set_sge_param,
2028         .get_coalesce = get_coalesce,
2029         .set_coalesce = set_coalesce,
2030         .get_eeprom_len = get_eeprom_len,
2031         .get_eeprom = get_eeprom,
2032         .set_eeprom = set_eeprom,
2033         .get_pauseparam = get_pauseparam,
2034         .set_pauseparam = set_pauseparam,
2035         .get_rx_csum = get_rx_csum,
2036         .set_rx_csum = set_rx_csum,
2037         .set_tx_csum = ethtool_op_set_tx_csum,
2038         .set_sg = ethtool_op_set_sg,
2039         .get_link = ethtool_op_get_link,
2040         .get_strings = get_strings,
2041         .phys_id = cxgb3_phys_id,
2042         .nway_reset = restart_autoneg,
2043         .get_sset_count = get_sset_count,
2044         .get_ethtool_stats = get_stats,
2045         .get_regs_len = get_regs_len,
2046         .get_regs = get_regs,
2047         .get_wol = get_wol,
2048         .set_tso = ethtool_op_set_tso,
2049 };
2050
2051 static int in_range(int val, int lo, int hi)
2052 {
2053         return val < 0 || (val <= hi && val >= lo);
2054 }
2055
2056 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2057 {
2058         struct port_info *pi = netdev_priv(dev);
2059         struct adapter *adapter = pi->adapter;
2060         u32 cmd;
2061         int ret;
2062
2063         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2064                 return -EFAULT;
2065
2066         switch (cmd) {
2067         case CHELSIO_SET_QSET_PARAMS:{
2068                 int i;
2069                 struct qset_params *q;
2070                 struct ch_qset_params t;
2071                 int q1 = pi->first_qset;
2072                 int nqsets = pi->nqsets;
2073
2074                 if (!capable(CAP_NET_ADMIN))
2075                         return -EPERM;
2076                 if (copy_from_user(&t, useraddr, sizeof(t)))
2077                         return -EFAULT;
2078                 if (t.qset_idx >= SGE_QSETS)
2079                         return -EINVAL;
2080                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2081                         !in_range(t.cong_thres, 0, 255) ||
2082                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2083                                 MAX_TXQ_ENTRIES) ||
2084                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2085                                 MAX_TXQ_ENTRIES) ||
2086                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2087                                 MAX_CTRL_TXQ_ENTRIES) ||
2088                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2089                                 MAX_RX_BUFFERS)
2090                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2091                                         MAX_RX_JUMBO_BUFFERS)
2092                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2093                                         MAX_RSPQ_ENTRIES))
2094                         return -EINVAL;
2095
2096                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2097                         for_each_port(adapter, i) {
2098                                 pi = adap2pinfo(adapter, i);
2099                                 if (t.qset_idx >= pi->first_qset &&
2100                                     t.qset_idx < pi->first_qset + pi->nqsets &&
2101                                     !(pi->rx_offload & T3_RX_CSUM))
2102                                         return -EINVAL;
2103                         }
2104
2105                 if ((adapter->flags & FULL_INIT_DONE) &&
2106                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2107                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2108                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2109                         t.polling >= 0 || t.cong_thres >= 0))
2110                         return -EBUSY;
2111
2112                 /* Allow setting of any available qset when offload enabled */
2113                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2114                         q1 = 0;
2115                         for_each_port(adapter, i) {
2116                                 pi = adap2pinfo(adapter, i);
2117                                 nqsets += pi->first_qset + pi->nqsets;
2118                         }
2119                 }
2120
2121                 if (t.qset_idx < q1)
2122                         return -EINVAL;
2123                 if (t.qset_idx > q1 + nqsets - 1)
2124                         return -EINVAL;
2125
2126                 q = &adapter->params.sge.qset[t.qset_idx];
2127
2128                 if (t.rspq_size >= 0)
2129                         q->rspq_size = t.rspq_size;
2130                 if (t.fl_size[0] >= 0)
2131                         q->fl_size = t.fl_size[0];
2132                 if (t.fl_size[1] >= 0)
2133                         q->jumbo_size = t.fl_size[1];
2134                 if (t.txq_size[0] >= 0)
2135                         q->txq_size[0] = t.txq_size[0];
2136                 if (t.txq_size[1] >= 0)
2137                         q->txq_size[1] = t.txq_size[1];
2138                 if (t.txq_size[2] >= 0)
2139                         q->txq_size[2] = t.txq_size[2];
2140                 if (t.cong_thres >= 0)
2141                         q->cong_thres = t.cong_thres;
2142                 if (t.intr_lat >= 0) {
2143                         struct sge_qset *qs =
2144                                 &adapter->sge.qs[t.qset_idx];
2145
2146                         q->coalesce_usecs = t.intr_lat;
2147                         t3_update_qset_coalesce(qs, q);
2148                 }
2149                 if (t.polling >= 0) {
2150                         if (adapter->flags & USING_MSIX)
2151                                 q->polling = t.polling;
2152                         else {
2153                                 /* No polling with INTx for T3A */
2154                                 if (adapter->params.rev == 0 &&
2155                                         !(adapter->flags & USING_MSI))
2156                                         t.polling = 0;
2157
2158                                 for (i = 0; i < SGE_QSETS; i++) {
2159                                         q = &adapter->params.sge.
2160                                                 qset[i];
2161                                         q->polling = t.polling;
2162                                 }
2163                         }
2164                 }
2165                 if (t.lro >= 0)
2166                         set_qset_lro(dev, t.qset_idx, t.lro);
2167
2168                 break;
2169         }
2170         case CHELSIO_GET_QSET_PARAMS:{
2171                 struct qset_params *q;
2172                 struct ch_qset_params t;
2173                 int q1 = pi->first_qset;
2174                 int nqsets = pi->nqsets;
2175                 int i;
2176
2177                 if (copy_from_user(&t, useraddr, sizeof(t)))
2178                         return -EFAULT;
2179
2180                 /* Display qsets for all ports when offload enabled */
2181                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2182                         q1 = 0;
2183                         for_each_port(adapter, i) {
2184                                 pi = adap2pinfo(adapter, i);
2185                                 nqsets = pi->first_qset + pi->nqsets;
2186                         }
2187                 }
2188
2189                 if (t.qset_idx >= nqsets)
2190                         return -EINVAL;
2191
2192                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2193                 t.rspq_size = q->rspq_size;
2194                 t.txq_size[0] = q->txq_size[0];
2195                 t.txq_size[1] = q->txq_size[1];
2196                 t.txq_size[2] = q->txq_size[2];
2197                 t.fl_size[0] = q->fl_size;
2198                 t.fl_size[1] = q->jumbo_size;
2199                 t.polling = q->polling;
2200                 t.lro = q->lro;
2201                 t.intr_lat = q->coalesce_usecs;
2202                 t.cong_thres = q->cong_thres;
2203                 t.qnum = q1;
2204
2205                 if (adapter->flags & USING_MSIX)
2206                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2207                 else
2208                         t.vector = adapter->pdev->irq;
2209
2210                 if (copy_to_user(useraddr, &t, sizeof(t)))
2211                         return -EFAULT;
2212                 break;
2213         }
2214         case CHELSIO_SET_QSET_NUM:{
2215                 struct ch_reg edata;
2216                 unsigned int i, first_qset = 0, other_qsets = 0;
2217
2218                 if (!capable(CAP_NET_ADMIN))
2219                         return -EPERM;
2220                 if (adapter->flags & FULL_INIT_DONE)
2221                         return -EBUSY;
2222                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2223                         return -EFAULT;
2224                 if (edata.val < 1 ||
2225                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2226                         return -EINVAL;
2227
2228                 for_each_port(adapter, i)
2229                         if (adapter->port[i] && adapter->port[i] != dev)
2230                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2231
2232                 if (edata.val + other_qsets > SGE_QSETS)
2233                         return -EINVAL;
2234
2235                 pi->nqsets = edata.val;
2236
2237                 for_each_port(adapter, i)
2238                         if (adapter->port[i]) {
2239                                 pi = adap2pinfo(adapter, i);
2240                                 pi->first_qset = first_qset;
2241                                 first_qset += pi->nqsets;
2242                         }
2243                 break;
2244         }
2245         case CHELSIO_GET_QSET_NUM:{
2246                 struct ch_reg edata;
2247
2248                 edata.cmd = CHELSIO_GET_QSET_NUM;
2249                 edata.val = pi->nqsets;
2250                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2251                         return -EFAULT;
2252                 break;
2253         }
2254         case CHELSIO_LOAD_FW:{
2255                 u8 *fw_data;
2256                 struct ch_mem_range t;
2257
2258                 if (!capable(CAP_SYS_RAWIO))
2259                         return -EPERM;
2260                 if (copy_from_user(&t, useraddr, sizeof(t)))
2261                         return -EFAULT;
2262                 /* Check t.len sanity ? */
2263                 fw_data = kmalloc(t.len, GFP_KERNEL);
2264                 if (!fw_data)
2265                         return -ENOMEM;
2266
2267                 if (copy_from_user
2268                         (fw_data, useraddr + sizeof(t), t.len)) {
2269                         kfree(fw_data);
2270                         return -EFAULT;
2271                 }
2272
2273                 ret = t3_load_fw(adapter, fw_data, t.len);
2274                 kfree(fw_data);
2275                 if (ret)
2276                         return ret;
2277                 break;
2278         }
2279         case CHELSIO_SETMTUTAB:{
2280                 struct ch_mtus m;
2281                 int i;
2282
2283                 if (!is_offload(adapter))
2284                         return -EOPNOTSUPP;
2285                 if (!capable(CAP_NET_ADMIN))
2286                         return -EPERM;
2287                 if (offload_running(adapter))
2288                         return -EBUSY;
2289                 if (copy_from_user(&m, useraddr, sizeof(m)))
2290                         return -EFAULT;
2291                 if (m.nmtus != NMTUS)
2292                         return -EINVAL;
2293                 if (m.mtus[0] < 81)     /* accommodate SACK */
2294                         return -EINVAL;
2295
2296                 /* MTUs must be in ascending order */
2297                 for (i = 1; i < NMTUS; ++i)
2298                         if (m.mtus[i] < m.mtus[i - 1])
2299                                 return -EINVAL;
2300
2301                 memcpy(adapter->params.mtus, m.mtus,
2302                         sizeof(adapter->params.mtus));
2303                 break;
2304         }
2305         case CHELSIO_GET_PM:{
2306                 struct tp_params *p = &adapter->params.tp;
2307                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2308
2309                 if (!is_offload(adapter))
2310                         return -EOPNOTSUPP;
2311                 m.tx_pg_sz = p->tx_pg_size;
2312                 m.tx_num_pg = p->tx_num_pgs;
2313                 m.rx_pg_sz = p->rx_pg_size;
2314                 m.rx_num_pg = p->rx_num_pgs;
2315                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2316                 if (copy_to_user(useraddr, &m, sizeof(m)))
2317                         return -EFAULT;
2318                 break;
2319         }
2320         case CHELSIO_SET_PM:{
2321                 struct ch_pm m;
2322                 struct tp_params *p = &adapter->params.tp;
2323
2324                 if (!is_offload(adapter))
2325                         return -EOPNOTSUPP;
2326                 if (!capable(CAP_NET_ADMIN))
2327                         return -EPERM;
2328                 if (adapter->flags & FULL_INIT_DONE)
2329                         return -EBUSY;
2330                 if (copy_from_user(&m, useraddr, sizeof(m)))
2331                         return -EFAULT;
2332                 if (!is_power_of_2(m.rx_pg_sz) ||
2333                         !is_power_of_2(m.tx_pg_sz))
2334                         return -EINVAL; /* not power of 2 */
2335                 if (!(m.rx_pg_sz & 0x14000))
2336                         return -EINVAL; /* not 16KB or 64KB */
2337                 if (!(m.tx_pg_sz & 0x1554000))
2338                         return -EINVAL;
2339                 if (m.tx_num_pg == -1)
2340                         m.tx_num_pg = p->tx_num_pgs;
2341                 if (m.rx_num_pg == -1)
2342                         m.rx_num_pg = p->rx_num_pgs;
2343                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2344                         return -EINVAL;
2345                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2346                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2347                         return -EINVAL;
2348                 p->rx_pg_size = m.rx_pg_sz;
2349                 p->tx_pg_size = m.tx_pg_sz;
2350                 p->rx_num_pgs = m.rx_num_pg;
2351                 p->tx_num_pgs = m.tx_num_pg;
2352                 break;
2353         }
2354         case CHELSIO_GET_MEM:{
2355                 struct ch_mem_range t;
2356                 struct mc7 *mem;
2357                 u64 buf[32];
2358
2359                 if (!is_offload(adapter))
2360                         return -EOPNOTSUPP;
2361                 if (!(adapter->flags & FULL_INIT_DONE))
2362                         return -EIO;    /* need the memory controllers */
2363                 if (copy_from_user(&t, useraddr, sizeof(t)))
2364                         return -EFAULT;
2365                 if ((t.addr & 7) || (t.len & 7))
2366                         return -EINVAL;
2367                 if (t.mem_id == MEM_CM)
2368                         mem = &adapter->cm;
2369                 else if (t.mem_id == MEM_PMRX)
2370                         mem = &adapter->pmrx;
2371                 else if (t.mem_id == MEM_PMTX)
2372                         mem = &adapter->pmtx;
2373                 else
2374                         return -EINVAL;
2375
2376                 /*
2377                  * Version scheme:
2378                  * bits 0..9: chip version
2379                  * bits 10..15: chip revision
2380                  */
2381                 t.version = 3 | (adapter->params.rev << 10);
2382                 if (copy_to_user(useraddr, &t, sizeof(t)))
2383                         return -EFAULT;
2384
2385                 /*
2386                  * Read 256 bytes at a time as len can be large and we don't
2387                  * want to use huge intermediate buffers.
2388                  */
2389                 useraddr += sizeof(t);  /* advance to start of buffer */
2390                 while (t.len) {
2391                         unsigned int chunk =
2392                                 min_t(unsigned int, t.len, sizeof(buf));
2393
2394                         ret =
2395                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2396                                                 buf);
2397                         if (ret)
2398                                 return ret;
2399                         if (copy_to_user(useraddr, buf, chunk))
2400                                 return -EFAULT;
2401                         useraddr += chunk;
2402                         t.addr += chunk;
2403                         t.len -= chunk;
2404                 }
2405                 break;
2406         }
2407         case CHELSIO_SET_TRACE_FILTER:{
2408                 struct ch_trace t;
2409                 const struct trace_params *tp;
2410
2411                 if (!capable(CAP_NET_ADMIN))
2412                         return -EPERM;
2413                 if (!offload_running(adapter))
2414                         return -EAGAIN;
2415                 if (copy_from_user(&t, useraddr, sizeof(t)))
2416                         return -EFAULT;
2417
2418                 tp = (const struct trace_params *)&t.sip;
2419                 if (t.config_tx)
2420                         t3_config_trace_filter(adapter, tp, 0,
2421                                                 t.invert_match,
2422                                                 t.trace_tx);
2423                 if (t.config_rx)
2424                         t3_config_trace_filter(adapter, tp, 1,
2425                                                 t.invert_match,
2426                                                 t.trace_rx);
2427                 break;
2428         }
2429         default:
2430                 return -EOPNOTSUPP;
2431         }
2432         return 0;
2433 }
2434
2435 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2436 {
2437         struct mii_ioctl_data *data = if_mii(req);
2438         struct port_info *pi = netdev_priv(dev);
2439         struct adapter *adapter = pi->adapter;
2440
2441         switch (cmd) {
2442         case SIOCGMIIREG:
2443         case SIOCSMIIREG:
2444                 /* Convert phy_id from older PRTAD/DEVAD format */
2445                 if (is_10G(adapter) &&
2446                     !mdio_phy_id_is_c45(data->phy_id) &&
2447                     (data->phy_id & 0x1f00) &&
2448                     !(data->phy_id & 0xe0e0))
2449                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2450                                                        data->phy_id & 0x1f);
2451                 /* FALLTHRU */
2452         case SIOCGMIIPHY:
2453                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2454         case SIOCCHIOCTL:
2455                 return cxgb_extension_ioctl(dev, req->ifr_data);
2456         default:
2457                 return -EOPNOTSUPP;
2458         }
2459 }
2460
2461 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2462 {
2463         struct port_info *pi = netdev_priv(dev);
2464         struct adapter *adapter = pi->adapter;
2465         int ret;
2466
2467         if (new_mtu < 81)       /* accommodate SACK */
2468                 return -EINVAL;
2469         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2470                 return ret;
2471         dev->mtu = new_mtu;
2472         init_port_mtus(adapter);
2473         if (adapter->params.rev == 0 && offload_running(adapter))
2474                 t3_load_mtus(adapter, adapter->params.mtus,
2475                              adapter->params.a_wnd, adapter->params.b_wnd,
2476                              adapter->port[0]->mtu);
2477         return 0;
2478 }
2479
2480 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2481 {
2482         struct port_info *pi = netdev_priv(dev);
2483         struct adapter *adapter = pi->adapter;
2484         struct sockaddr *addr = p;
2485
2486         if (!is_valid_ether_addr(addr->sa_data))
2487                 return -EINVAL;
2488
2489         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2490         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2491         if (offload_running(adapter))
2492                 write_smt_entry(adapter, pi->port_id);
2493         return 0;
2494 }
2495
2496 /**
2497  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2498  * @adap: the adapter
2499  * @p: the port
2500  *
2501  * Ensures that current Rx processing on any of the queues associated with
2502  * the given port completes before returning.  We do this by acquiring and
2503  * releasing the locks of the response queues associated with the port.
2504  */
2505 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2506 {
2507         int i;
2508
2509         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2510                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2511
2512                 spin_lock_irq(&q->lock);
2513                 spin_unlock_irq(&q->lock);
2514         }
2515 }
2516
2517 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2518 {
2519         struct port_info *pi = netdev_priv(dev);
2520         struct adapter *adapter = pi->adapter;
2521
2522         pi->vlan_grp = grp;
2523         if (adapter->params.rev > 0)
2524                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2525         else {
2526                 /* single control for all ports */
2527                 unsigned int i, have_vlans = 0;
2528                 for_each_port(adapter, i)
2529                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2530
2531                 t3_set_vlan_accel(adapter, 1, have_vlans);
2532         }
2533         t3_synchronize_rx(adapter, pi);
2534 }
2535
2536 #ifdef CONFIG_NET_POLL_CONTROLLER
2537 static void cxgb_netpoll(struct net_device *dev)
2538 {
2539         struct port_info *pi = netdev_priv(dev);
2540         struct adapter *adapter = pi->adapter;
2541         int qidx;
2542
2543         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2544                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2545                 void *source;
2546
2547                 if (adapter->flags & USING_MSIX)
2548                         source = qs;
2549                 else
2550                         source = adapter;
2551
2552                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2553         }
2554 }
2555 #endif
2556
2557 /*
2558  * Periodic accumulation of MAC statistics.
2559  */
2560 static void mac_stats_update(struct adapter *adapter)
2561 {
2562         int i;
2563
2564         for_each_port(adapter, i) {
2565                 struct net_device *dev = adapter->port[i];
2566                 struct port_info *p = netdev_priv(dev);
2567
2568                 if (netif_running(dev)) {
2569                         spin_lock(&adapter->stats_lock);
2570                         t3_mac_update_stats(&p->mac);
2571                         spin_unlock(&adapter->stats_lock);
2572                 }
2573         }
2574 }
2575
2576 static void check_link_status(struct adapter *adapter)
2577 {
2578         int i;
2579
2580         for_each_port(adapter, i) {
2581                 struct net_device *dev = adapter->port[i];
2582                 struct port_info *p = netdev_priv(dev);
2583                 int link_fault;
2584
2585                 spin_lock_irq(&adapter->work_lock);
2586                 link_fault = p->link_fault;
2587                 spin_unlock_irq(&adapter->work_lock);
2588
2589                 if (link_fault) {
2590                         t3_link_fault(adapter, i);
2591                         continue;
2592                 }
2593
2594                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2595                         t3_xgm_intr_disable(adapter, i);
2596                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2597
2598                         t3_link_changed(adapter, i);
2599                         t3_xgm_intr_enable(adapter, i);
2600                 }
2601         }
2602 }
2603
2604 static void check_t3b2_mac(struct adapter *adapter)
2605 {
2606         int i;
2607
2608         if (!rtnl_trylock())    /* synchronize with ifdown */
2609                 return;
2610
2611         for_each_port(adapter, i) {
2612                 struct net_device *dev = adapter->port[i];
2613                 struct port_info *p = netdev_priv(dev);
2614                 int status;
2615
2616                 if (!netif_running(dev))
2617                         continue;
2618
2619                 status = 0;
2620                 if (netif_running(dev) && netif_carrier_ok(dev))
2621                         status = t3b2_mac_watchdog_task(&p->mac);
2622                 if (status == 1)
2623                         p->mac.stats.num_toggled++;
2624                 else if (status == 2) {
2625                         struct cmac *mac = &p->mac;
2626
2627                         t3_mac_set_mtu(mac, dev->mtu);
2628                         t3_mac_set_address(mac, 0, dev->dev_addr);
2629                         cxgb_set_rxmode(dev);
2630                         t3_link_start(&p->phy, mac, &p->link_config);
2631                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2632                         t3_port_intr_enable(adapter, p->port_id);
2633                         p->mac.stats.num_resets++;
2634                 }
2635         }
2636         rtnl_unlock();
2637 }
2638
2639
2640 static void t3_adap_check_task(struct work_struct *work)
2641 {
2642         struct adapter *adapter = container_of(work, struct adapter,
2643                                                adap_check_task.work);
2644         const struct adapter_params *p = &adapter->params;
2645         int port;
2646         unsigned int v, status, reset;
2647
2648         adapter->check_task_cnt++;
2649
2650         check_link_status(adapter);
2651
2652         /* Accumulate MAC stats if needed */
2653         if (!p->linkpoll_period ||
2654             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2655             p->stats_update_period) {
2656                 mac_stats_update(adapter);
2657                 adapter->check_task_cnt = 0;
2658         }
2659
2660         if (p->rev == T3_REV_B2)
2661                 check_t3b2_mac(adapter);
2662
2663         /*
2664          * Scan the XGMAC's to check for various conditions which we want to
2665          * monitor in a periodic polling manner rather than via an interrupt
2666          * condition.  This is used for conditions which would otherwise flood
2667          * the system with interrupts and we only really need to know that the
2668          * conditions are "happening" ...  For each condition we count the
2669          * detection of the condition and reset it for the next polling loop.
2670          */
2671         for_each_port(adapter, port) {
2672                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2673                 u32 cause;
2674
2675                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2676                 reset = 0;
2677                 if (cause & F_RXFIFO_OVERFLOW) {
2678                         mac->stats.rx_fifo_ovfl++;
2679                         reset |= F_RXFIFO_OVERFLOW;
2680                 }
2681
2682                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2683         }
2684
2685         /*
2686          * We do the same as above for FL_EMPTY interrupts.
2687          */
2688         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2689         reset = 0;
2690
2691         if (status & F_FLEMPTY) {
2692                 struct sge_qset *qs = &adapter->sge.qs[0];
2693                 int i = 0;
2694
2695                 reset |= F_FLEMPTY;
2696
2697                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2698                     0xffff;
2699
2700                 while (v) {
2701                         qs->fl[i].empty += (v & 1);
2702                         if (i)
2703                                 qs++;
2704                         i ^= 1;
2705                         v >>= 1;
2706                 }
2707         }
2708
2709         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2710
2711         /* Schedule the next check update if any port is active. */
2712         spin_lock_irq(&adapter->work_lock);
2713         if (adapter->open_device_map & PORT_MASK)
2714                 schedule_chk_task(adapter);
2715         spin_unlock_irq(&adapter->work_lock);
2716 }
2717
2718 /*
2719  * Processes external (PHY) interrupts in process context.
2720  */
2721 static void ext_intr_task(struct work_struct *work)
2722 {
2723         struct adapter *adapter = container_of(work, struct adapter,
2724                                                ext_intr_handler_task);
2725         int i;
2726
2727         /* Disable link fault interrupts */
2728         for_each_port(adapter, i) {
2729                 struct net_device *dev = adapter->port[i];
2730                 struct port_info *p = netdev_priv(dev);
2731
2732                 t3_xgm_intr_disable(adapter, i);
2733                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2734         }
2735
2736         /* Re-enable link fault interrupts */
2737         t3_phy_intr_handler(adapter);
2738
2739         for_each_port(adapter, i)
2740                 t3_xgm_intr_enable(adapter, i);
2741
2742         /* Now reenable external interrupts */
2743         spin_lock_irq(&adapter->work_lock);
2744         if (adapter->slow_intr_mask) {
2745                 adapter->slow_intr_mask |= F_T3DBG;
2746                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2747                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2748                              adapter->slow_intr_mask);
2749         }
2750         spin_unlock_irq(&adapter->work_lock);
2751 }
2752
2753 /*
2754  * Interrupt-context handler for external (PHY) interrupts.
2755  */
2756 void t3_os_ext_intr_handler(struct adapter *adapter)
2757 {
2758         /*
2759          * Schedule a task to handle external interrupts as they may be slow
2760          * and we use a mutex to protect MDIO registers.  We disable PHY
2761          * interrupts in the meantime and let the task reenable them when
2762          * it's done.
2763          */
2764         spin_lock(&adapter->work_lock);
2765         if (adapter->slow_intr_mask) {
2766                 adapter->slow_intr_mask &= ~F_T3DBG;
2767                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2768                              adapter->slow_intr_mask);
2769                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2770         }
2771         spin_unlock(&adapter->work_lock);
2772 }
2773
2774 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2775 {
2776         struct net_device *netdev = adapter->port[port_id];
2777         struct port_info *pi = netdev_priv(netdev);
2778
2779         spin_lock(&adapter->work_lock);
2780         pi->link_fault = 1;
2781         spin_unlock(&adapter->work_lock);
2782 }
2783
2784 static int t3_adapter_error(struct adapter *adapter, int reset)
2785 {
2786         int i, ret = 0;
2787
2788         if (is_offload(adapter) &&
2789             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2790                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2791                 offload_close(&adapter->tdev);
2792         }
2793
2794         /* Stop all ports */
2795         for_each_port(adapter, i) {
2796                 struct net_device *netdev = adapter->port[i];
2797
2798                 if (netif_running(netdev))
2799                         cxgb_close(netdev);
2800         }
2801
2802         /* Stop SGE timers */
2803         t3_stop_sge_timers(adapter);
2804
2805         adapter->flags &= ~FULL_INIT_DONE;
2806
2807         if (reset)
2808                 ret = t3_reset_adapter(adapter);
2809
2810         pci_disable_device(adapter->pdev);
2811
2812         return ret;
2813 }
2814
2815 static int t3_reenable_adapter(struct adapter *adapter)
2816 {
2817         if (pci_enable_device(adapter->pdev)) {
2818                 dev_err(&adapter->pdev->dev,
2819                         "Cannot re-enable PCI device after reset.\n");
2820                 goto err;
2821         }
2822         pci_set_master(adapter->pdev);
2823         pci_restore_state(adapter->pdev);
2824
2825         /* Free sge resources */
2826         t3_free_sge_resources(adapter);
2827
2828         if (t3_replay_prep_adapter(adapter))
2829                 goto err;
2830
2831         return 0;
2832 err:
2833         return -1;
2834 }
2835
2836 static void t3_resume_ports(struct adapter *adapter)
2837 {
2838         int i;
2839
2840         /* Restart the ports */
2841         for_each_port(adapter, i) {
2842                 struct net_device *netdev = adapter->port[i];
2843
2844                 if (netif_running(netdev)) {
2845                         if (cxgb_open(netdev)) {
2846                                 dev_err(&adapter->pdev->dev,
2847                                         "can't bring device back up"
2848                                         " after reset\n");
2849                                 continue;
2850                         }
2851                 }
2852         }
2853
2854         if (is_offload(adapter) && !ofld_disable)
2855                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2856 }
2857
2858 /*
2859  * processes a fatal error.
2860  * Bring the ports down, reset the chip, bring the ports back up.
2861  */
2862 static void fatal_error_task(struct work_struct *work)
2863 {
2864         struct adapter *adapter = container_of(work, struct adapter,
2865                                                fatal_error_handler_task);
2866         int err = 0;
2867
2868         rtnl_lock();
2869         err = t3_adapter_error(adapter, 1);
2870         if (!err)
2871                 err = t3_reenable_adapter(adapter);
2872         if (!err)
2873                 t3_resume_ports(adapter);
2874
2875         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2876         rtnl_unlock();
2877 }
2878
2879 void t3_fatal_err(struct adapter *adapter)
2880 {
2881         unsigned int fw_status[4];
2882
2883         if (adapter->flags & FULL_INIT_DONE) {
2884                 t3_sge_stop(adapter);
2885                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2886                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2887                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2888                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2889
2890                 spin_lock(&adapter->work_lock);
2891                 t3_intr_disable(adapter);
2892                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2893                 spin_unlock(&adapter->work_lock);
2894         }
2895         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2896         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2897                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2898                          fw_status[0], fw_status[1],
2899                          fw_status[2], fw_status[3]);
2900 }
2901
2902 /**
2903  * t3_io_error_detected - called when PCI error is detected
2904  * @pdev: Pointer to PCI device
2905  * @state: The current pci connection state
2906  *
2907  * This function is called after a PCI bus error affecting
2908  * this device has been detected.
2909  */
2910 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2911                                              pci_channel_state_t state)
2912 {
2913         struct adapter *adapter = pci_get_drvdata(pdev);
2914         int ret;
2915
2916         if (state == pci_channel_io_perm_failure)
2917                 return PCI_ERS_RESULT_DISCONNECT;
2918
2919         ret = t3_adapter_error(adapter, 0);
2920
2921         /* Request a slot reset. */
2922         return PCI_ERS_RESULT_NEED_RESET;
2923 }
2924
2925 /**
2926  * t3_io_slot_reset - called after the pci bus has been reset.
2927  * @pdev: Pointer to PCI device
2928  *
2929  * Restart the card from scratch, as if from a cold-boot.
2930  */
2931 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2932 {
2933         struct adapter *adapter = pci_get_drvdata(pdev);
2934
2935         if (!t3_reenable_adapter(adapter))
2936                 return PCI_ERS_RESULT_RECOVERED;
2937
2938         return PCI_ERS_RESULT_DISCONNECT;
2939 }
2940
2941 /**
2942  * t3_io_resume - called when traffic can start flowing again.
2943  * @pdev: Pointer to PCI device
2944  *
2945  * This callback is called when the error recovery driver tells us that
2946  * its OK to resume normal operation.
2947  */
2948 static void t3_io_resume(struct pci_dev *pdev)
2949 {
2950         struct adapter *adapter = pci_get_drvdata(pdev);
2951
2952         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2953                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
2954
2955         t3_resume_ports(adapter);
2956 }
2957
2958 static struct pci_error_handlers t3_err_handler = {
2959         .error_detected = t3_io_error_detected,
2960         .slot_reset = t3_io_slot_reset,
2961         .resume = t3_io_resume,
2962 };
2963
2964 /*
2965  * Set the number of qsets based on the number of CPUs and the number of ports,
2966  * not to exceed the number of available qsets, assuming there are enough qsets
2967  * per port in HW.
2968  */
2969 static void set_nqsets(struct adapter *adap)
2970 {
2971         int i, j = 0;
2972         int num_cpus = num_online_cpus();
2973         int hwports = adap->params.nports;
2974         int nqsets = adap->msix_nvectors - 1;
2975
2976         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2977                 if (hwports == 2 &&
2978                     (hwports * nqsets > SGE_QSETS ||
2979                      num_cpus >= nqsets / hwports))
2980                         nqsets /= hwports;
2981                 if (nqsets > num_cpus)
2982                         nqsets = num_cpus;
2983                 if (nqsets < 1 || hwports == 4)
2984                         nqsets = 1;
2985         } else
2986                 nqsets = 1;
2987
2988         for_each_port(adap, i) {
2989                 struct port_info *pi = adap2pinfo(adap, i);
2990
2991                 pi->first_qset = j;
2992                 pi->nqsets = nqsets;
2993                 j = pi->first_qset + nqsets;
2994
2995                 dev_info(&adap->pdev->dev,
2996                          "Port %d using %d queue sets.\n", i, nqsets);
2997         }
2998 }
2999
3000 static int __devinit cxgb_enable_msix(struct adapter *adap)
3001 {
3002         struct msix_entry entries[SGE_QSETS + 1];
3003         int vectors;
3004         int i, err;
3005
3006         vectors = ARRAY_SIZE(entries);
3007         for (i = 0; i < vectors; ++i)
3008                 entries[i].entry = i;
3009
3010         while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3011                 vectors = err;
3012
3013         if (err < 0)
3014                 pci_disable_msix(adap->pdev);
3015
3016         if (!err && vectors < (adap->params.nports + 1)) {
3017                 pci_disable_msix(adap->pdev);
3018                 err = -1;
3019         }
3020
3021         if (!err) {
3022                 for (i = 0; i < vectors; ++i)
3023                         adap->msix_info[i].vec = entries[i].vector;
3024                 adap->msix_nvectors = vectors;
3025         }
3026
3027         return err;
3028 }
3029
3030 static void __devinit print_port_info(struct adapter *adap,
3031                                       const struct adapter_info *ai)
3032 {
3033         static const char *pci_variant[] = {
3034                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3035         };
3036
3037         int i;
3038         char buf[80];
3039
3040         if (is_pcie(adap))
3041                 snprintf(buf, sizeof(buf), "%s x%d",
3042                          pci_variant[adap->params.pci.variant],
3043                          adap->params.pci.width);
3044         else
3045                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3046                          pci_variant[adap->params.pci.variant],
3047                          adap->params.pci.speed, adap->params.pci.width);
3048
3049         for_each_port(adap, i) {
3050                 struct net_device *dev = adap->port[i];
3051                 const struct port_info *pi = netdev_priv(dev);
3052
3053                 if (!test_bit(i, &adap->registered_device_map))
3054                         continue;
3055                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3056                        dev->name, ai->desc, pi->phy.desc,
3057                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
3058                        (adap->flags & USING_MSIX) ? " MSI-X" :
3059                        (adap->flags & USING_MSI) ? " MSI" : "");
3060                 if (adap->name == dev->name && adap->params.vpd.mclk)
3061                         printk(KERN_INFO
3062                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3063                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3064                                t3_mc7_size(&adap->pmtx) >> 20,
3065                                t3_mc7_size(&adap->pmrx) >> 20,
3066                                adap->params.vpd.sn);
3067         }
3068 }
3069
3070 static const struct net_device_ops cxgb_netdev_ops = {
3071         .ndo_open               = cxgb_open,
3072         .ndo_stop               = cxgb_close,
3073         .ndo_start_xmit         = t3_eth_xmit,
3074         .ndo_get_stats          = cxgb_get_stats,
3075         .ndo_validate_addr      = eth_validate_addr,
3076         .ndo_set_multicast_list = cxgb_set_rxmode,
3077         .ndo_do_ioctl           = cxgb_ioctl,
3078         .ndo_change_mtu         = cxgb_change_mtu,
3079         .ndo_set_mac_address    = cxgb_set_mac_addr,
3080         .ndo_vlan_rx_register   = vlan_rx_register,
3081 #ifdef CONFIG_NET_POLL_CONTROLLER
3082         .ndo_poll_controller    = cxgb_netpoll,
3083 #endif
3084 };
3085
3086 static int __devinit init_one(struct pci_dev *pdev,
3087                               const struct pci_device_id *ent)
3088 {
3089         static int version_printed;
3090
3091         int i, err, pci_using_dac = 0;
3092         resource_size_t mmio_start, mmio_len;
3093         const struct adapter_info *ai;
3094         struct adapter *adapter = NULL;
3095         struct port_info *pi;
3096
3097         if (!version_printed) {
3098                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3099                 ++version_printed;
3100         }
3101
3102         if (!cxgb3_wq) {
3103                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3104                 if (!cxgb3_wq) {
3105                         printk(KERN_ERR DRV_NAME
3106                                ": cannot initialize work queue\n");
3107                         return -ENOMEM;
3108                 }
3109         }
3110
3111         err = pci_request_regions(pdev, DRV_NAME);
3112         if (err) {
3113                 /* Just info, some other driver may have claimed the device. */
3114                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3115                 return err;
3116         }
3117
3118         err = pci_enable_device(pdev);
3119         if (err) {
3120                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3121                 goto out_release_regions;
3122         }
3123
3124         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3125                 pci_using_dac = 1;
3126                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3127                 if (err) {
3128                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3129                                "coherent allocations\n");
3130                         goto out_disable_device;
3131                 }
3132         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3133                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3134                 goto out_disable_device;
3135         }
3136
3137         pci_set_master(pdev);
3138         pci_save_state(pdev);
3139
3140         mmio_start = pci_resource_start(pdev, 0);
3141         mmio_len = pci_resource_len(pdev, 0);
3142         ai = t3_get_adapter_info(ent->driver_data);
3143
3144         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3145         if (!adapter) {
3146                 err = -ENOMEM;
3147                 goto out_disable_device;
3148         }
3149
3150         adapter->nofail_skb =
3151                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3152         if (!adapter->nofail_skb) {
3153                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3154                 err = -ENOMEM;
3155                 goto out_free_adapter;
3156         }
3157
3158         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3159         if (!adapter->regs) {
3160                 dev_err(&pdev->dev, "cannot map device registers\n");
3161                 err = -ENOMEM;
3162                 goto out_free_adapter;
3163         }
3164
3165         adapter->pdev = pdev;
3166         adapter->name = pci_name(pdev);
3167         adapter->msg_enable = dflt_msg_enable;
3168         adapter->mmio_len = mmio_len;
3169
3170         mutex_init(&adapter->mdio_lock);
3171         spin_lock_init(&adapter->work_lock);
3172         spin_lock_init(&adapter->stats_lock);
3173
3174         INIT_LIST_HEAD(&adapter->adapter_list);
3175         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3176         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3177         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3178
3179         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3180                 struct net_device *netdev;
3181
3182                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3183                 if (!netdev) {
3184                         err = -ENOMEM;
3185                         goto out_free_dev;
3186                 }
3187
3188                 SET_NETDEV_DEV(netdev, &pdev->dev);
3189
3190                 adapter->port[i] = netdev;
3191                 pi = netdev_priv(netdev);
3192                 pi->adapter = adapter;
3193                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3194                 pi->port_id = i;
3195                 netif_carrier_off(netdev);
3196                 netif_tx_stop_all_queues(netdev);
3197                 netdev->irq = pdev->irq;
3198                 netdev->mem_start = mmio_start;
3199                 netdev->mem_end = mmio_start + mmio_len - 1;
3200                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3201                 netdev->features |= NETIF_F_GRO;
3202                 if (pci_using_dac)
3203                         netdev->features |= NETIF_F_HIGHDMA;
3204
3205                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3206                 netdev->netdev_ops = &cxgb_netdev_ops;
3207                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3208         }
3209
3210         pci_set_drvdata(pdev, adapter);
3211         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3212                 err = -ENODEV;
3213                 goto out_free_dev;
3214         }
3215
3216         /*
3217          * The card is now ready to go.  If any errors occur during device
3218          * registration we do not fail the whole card but rather proceed only
3219          * with the ports we manage to register successfully.  However we must
3220          * register at least one net device.
3221          */
3222         for_each_port(adapter, i) {
3223                 err = register_netdev(adapter->port[i]);
3224                 if (err)
3225                         dev_warn(&pdev->dev,
3226                                  "cannot register net device %s, skipping\n",
3227                                  adapter->port[i]->name);
3228                 else {
3229                         /*
3230                          * Change the name we use for messages to the name of
3231                          * the first successfully registered interface.
3232                          */
3233                         if (!adapter->registered_device_map)
3234                                 adapter->name = adapter->port[i]->name;
3235
3236                         __set_bit(i, &adapter->registered_device_map);
3237                 }
3238         }
3239         if (!adapter->registered_device_map) {
3240                 dev_err(&pdev->dev, "could not register any net devices\n");
3241                 goto out_free_dev;
3242         }
3243
3244         /* Driver's ready. Reflect it on LEDs */
3245         t3_led_ready(adapter);
3246
3247         if (is_offload(adapter)) {
3248                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3249                 cxgb3_adapter_ofld(adapter);
3250         }
3251
3252         /* See what interrupts we'll be using */
3253         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3254                 adapter->flags |= USING_MSIX;
3255         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3256                 adapter->flags |= USING_MSI;
3257
3258         set_nqsets(adapter);
3259
3260         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3261                                  &cxgb3_attr_group);
3262
3263         print_port_info(adapter, ai);
3264         return 0;
3265
3266 out_free_dev:
3267         iounmap(adapter->regs);
3268         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3269                 if (adapter->port[i])
3270                         free_netdev(adapter->port[i]);
3271
3272 out_free_adapter:
3273         kfree(adapter);
3274
3275 out_disable_device:
3276         pci_disable_device(pdev);
3277 out_release_regions:
3278         pci_release_regions(pdev);
3279         pci_set_drvdata(pdev, NULL);
3280         return err;
3281 }
3282
3283 static void __devexit remove_one(struct pci_dev *pdev)
3284 {
3285         struct adapter *adapter = pci_get_drvdata(pdev);
3286
3287         if (adapter) {
3288                 int i;
3289
3290                 t3_sge_stop(adapter);
3291                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3292                                    &cxgb3_attr_group);
3293
3294                 if (is_offload(adapter)) {
3295                         cxgb3_adapter_unofld(adapter);
3296                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3297                                      &adapter->open_device_map))
3298                                 offload_close(&adapter->tdev);
3299                 }
3300
3301                 for_each_port(adapter, i)
3302                     if (test_bit(i, &adapter->registered_device_map))
3303                         unregister_netdev(adapter->port[i]);
3304
3305                 t3_stop_sge_timers(adapter);
3306                 t3_free_sge_resources(adapter);
3307                 cxgb_disable_msi(adapter);
3308
3309                 for_each_port(adapter, i)
3310                         if (adapter->port[i])
3311                                 free_netdev(adapter->port[i]);
3312
3313                 iounmap(adapter->regs);
3314                 if (adapter->nofail_skb)
3315                         kfree_skb(adapter->nofail_skb);
3316                 kfree(adapter);
3317                 pci_release_regions(pdev);
3318                 pci_disable_device(pdev);
3319                 pci_set_drvdata(pdev, NULL);
3320         }
3321 }
3322
3323 static struct pci_driver driver = {
3324         .name = DRV_NAME,
3325         .id_table = cxgb3_pci_tbl,
3326         .probe = init_one,
3327         .remove = __devexit_p(remove_one),
3328         .err_handler = &t3_err_handler,
3329 };
3330
3331 static int __init cxgb3_init_module(void)
3332 {
3333         int ret;
3334
3335         cxgb3_offload_init();
3336
3337         ret = pci_register_driver(&driver);
3338         return ret;
3339 }
3340
3341 static void __exit cxgb3_cleanup_module(void)
3342 {
3343         pci_unregister_driver(&driver);
3344         if (cxgb3_wq)
3345                 destroy_workqueue(cxgb3_wq);
3346 }
3347
3348 module_init(cxgb3_init_module);
3349 module_exit(cxgb3_cleanup_module);