Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[safe/jmp/linux-2.6] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         {0,}
95 };
96
97 MODULE_DESCRIPTION(DRV_DESC);
98 MODULE_AUTHOR("Chelsio Communications");
99 MODULE_LICENSE("Dual BSD/GPL");
100 MODULE_VERSION(DRV_VERSION);
101 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102
103 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104
105 module_param(dflt_msg_enable, int, 0644);
106 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107
108 /*
109  * The driver uses the best interrupt scheme available on a platform in the
110  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
111  * of these schemes the driver may consider as follows:
112  *
113  * msi = 2: choose from among all three options
114  * msi = 1: only consider MSI and pin interrupts
115  * msi = 0: force pin interrupts
116  */
117 static int msi = 2;
118
119 module_param(msi, int, 0644);
120 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121
122 /*
123  * The driver enables offload as a default.
124  * To disable it, use ofld_disable = 1.
125  */
126
127 static int ofld_disable = 0;
128
129 module_param(ofld_disable, int, 0644);
130 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131
132 /*
133  * We have work elements that we need to cancel when an interface is taken
134  * down.  Normally the work elements would be executed by keventd but that
135  * can deadlock because of linkwatch.  If our close method takes the rtnl
136  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138  * for our work to complete.  Get our own work queue to solve this.
139  */
140 static struct workqueue_struct *cxgb3_wq;
141
142 /**
143  *      link_report - show link status and link speed/duplex
144  *      @p: the port whose settings are to be reported
145  *
146  *      Shows the link status, speed, and duplex of a port.
147  */
148 static void link_report(struct net_device *dev)
149 {
150         if (!netif_carrier_ok(dev))
151                 printk(KERN_INFO "%s: link down\n", dev->name);
152         else {
153                 const char *s = "10Mbps";
154                 const struct port_info *p = netdev_priv(dev);
155
156                 switch (p->link_config.speed) {
157                 case SPEED_10000:
158                         s = "10Gbps";
159                         break;
160                 case SPEED_1000:
161                         s = "1000Mbps";
162                         break;
163                 case SPEED_100:
164                         s = "100Mbps";
165                         break;
166                 }
167
168                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
170         }
171 }
172
173 /**
174  *      t3_os_link_changed - handle link status changes
175  *      @adapter: the adapter associated with the link change
176  *      @port_id: the port index whose limk status has changed
177  *      @link_stat: the new status of the link
178  *      @speed: the new speed setting
179  *      @duplex: the new duplex setting
180  *      @pause: the new flow-control setting
181  *
182  *      This is the OS-dependent handler for link status changes.  The OS
183  *      neutral handler takes care of most of the processing for these events,
184  *      then calls this handler for any OS-specific processing.
185  */
186 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
187                         int speed, int duplex, int pause)
188 {
189         struct net_device *dev = adapter->port[port_id];
190         struct port_info *pi = netdev_priv(dev);
191         struct cmac *mac = &pi->mac;
192
193         /* Skip changes from disabled ports. */
194         if (!netif_running(dev))
195                 return;
196
197         if (link_stat != netif_carrier_ok(dev)) {
198                 if (link_stat) {
199                         t3_mac_enable(mac, MAC_DIRECTION_RX);
200                         netif_carrier_on(dev);
201                 } else {
202                         netif_carrier_off(dev);
203                         pi->phy.ops->power_down(&pi->phy, 1);
204                         t3_mac_disable(mac, MAC_DIRECTION_RX);
205                         t3_link_start(&pi->phy, mac, &pi->link_config);
206                 }
207
208                 link_report(dev);
209         }
210 }
211
212 /**
213  *      t3_os_phymod_changed - handle PHY module changes
214  *      @phy: the PHY reporting the module change
215  *      @mod_type: new module type
216  *
217  *      This is the OS-dependent handler for PHY module changes.  It is
218  *      invoked when a PHY module is removed or inserted for any OS-specific
219  *      processing.
220  */
221 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 {
223         static const char *mod_str[] = {
224                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
225         };
226
227         const struct net_device *dev = adap->port[port_id];
228         const struct port_info *pi = netdev_priv(dev);
229
230         if (pi->phy.modtype == phy_modtype_none)
231                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
232         else
233                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
234                        mod_str[pi->phy.modtype]);
235 }
236
237 static void cxgb_set_rxmode(struct net_device *dev)
238 {
239         struct t3_rx_mode rm;
240         struct port_info *pi = netdev_priv(dev);
241
242         init_rx_mode(&rm, dev, dev->mc_list);
243         t3_mac_set_rx_mode(&pi->mac, &rm);
244 }
245
246 /**
247  *      link_start - enable a port
248  *      @dev: the device to enable
249  *
250  *      Performs the MAC and PHY actions needed to enable a port.
251  */
252 static void link_start(struct net_device *dev)
253 {
254         struct t3_rx_mode rm;
255         struct port_info *pi = netdev_priv(dev);
256         struct cmac *mac = &pi->mac;
257
258         init_rx_mode(&rm, dev, dev->mc_list);
259         t3_mac_reset(mac);
260         t3_mac_set_mtu(mac, dev->mtu);
261         t3_mac_set_address(mac, 0, dev->dev_addr);
262         t3_mac_set_rx_mode(mac, &rm);
263         t3_link_start(&pi->phy, mac, &pi->link_config);
264         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
265 }
266
267 static inline void cxgb_disable_msi(struct adapter *adapter)
268 {
269         if (adapter->flags & USING_MSIX) {
270                 pci_disable_msix(adapter->pdev);
271                 adapter->flags &= ~USING_MSIX;
272         } else if (adapter->flags & USING_MSI) {
273                 pci_disable_msi(adapter->pdev);
274                 adapter->flags &= ~USING_MSI;
275         }
276 }
277
278 /*
279  * Interrupt handler for asynchronous events used with MSI-X.
280  */
281 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 {
283         t3_slow_intr_handler(cookie);
284         return IRQ_HANDLED;
285 }
286
287 /*
288  * Name the MSI-X interrupts.
289  */
290 static void name_msix_vecs(struct adapter *adap)
291 {
292         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293
294         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
295         adap->msix_info[0].desc[n] = 0;
296
297         for_each_port(adap, j) {
298                 struct net_device *d = adap->port[j];
299                 const struct port_info *pi = netdev_priv(d);
300
301                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
302                         snprintf(adap->msix_info[msi_idx].desc, n,
303                                  "%s-%d", d->name, pi->first_qset + i);
304                         adap->msix_info[msi_idx].desc[n] = 0;
305                 }
306         }
307 }
308
309 static int request_msix_data_irqs(struct adapter *adap)
310 {
311         int i, j, err, qidx = 0;
312
313         for_each_port(adap, i) {
314                 int nqsets = adap2pinfo(adap, i)->nqsets;
315
316                 for (j = 0; j < nqsets; ++j) {
317                         err = request_irq(adap->msix_info[qidx + 1].vec,
318                                           t3_intr_handler(adap,
319                                                           adap->sge.qs[qidx].
320                                                           rspq.polling), 0,
321                                           adap->msix_info[qidx + 1].desc,
322                                           &adap->sge.qs[qidx]);
323                         if (err) {
324                                 while (--qidx >= 0)
325                                         free_irq(adap->msix_info[qidx + 1].vec,
326                                                  &adap->sge.qs[qidx]);
327                                 return err;
328                         }
329                         qidx++;
330                 }
331         }
332         return 0;
333 }
334
335 static void free_irq_resources(struct adapter *adapter)
336 {
337         if (adapter->flags & USING_MSIX) {
338                 int i, n = 0;
339
340                 free_irq(adapter->msix_info[0].vec, adapter);
341                 for_each_port(adapter, i)
342                         n += adap2pinfo(adapter, i)->nqsets;
343
344                 for (i = 0; i < n; ++i)
345                         free_irq(adapter->msix_info[i + 1].vec,
346                                  &adapter->sge.qs[i]);
347         } else
348                 free_irq(adapter->pdev->irq, adapter);
349 }
350
351 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
352                               unsigned long n)
353 {
354         int attempts = 5;
355
356         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
357                 if (!--attempts)
358                         return -ETIMEDOUT;
359                 msleep(10);
360         }
361         return 0;
362 }
363
364 static int init_tp_parity(struct adapter *adap)
365 {
366         int i;
367         struct sk_buff *skb;
368         struct cpl_set_tcb_field *greq;
369         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370
371         t3_tp_set_offload_mode(adap, 1);
372
373         for (i = 0; i < 16; i++) {
374                 struct cpl_smt_write_req *req;
375
376                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
377                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
378                 memset(req, 0, sizeof(*req));
379                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
380                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
381                 req->iff = i;
382                 t3_mgmt_tx(adap, skb);
383         }
384
385         for (i = 0; i < 2048; i++) {
386                 struct cpl_l2t_write_req *req;
387
388                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
389                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
390                 memset(req, 0, sizeof(*req));
391                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
392                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
393                 req->params = htonl(V_L2T_W_IDX(i));
394                 t3_mgmt_tx(adap, skb);
395         }
396
397         for (i = 0; i < 2048; i++) {
398                 struct cpl_rte_write_req *req;
399
400                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
401                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
402                 memset(req, 0, sizeof(*req));
403                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
404                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
405                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
406                 t3_mgmt_tx(adap, skb);
407         }
408
409         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
410         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
411         memset(greq, 0, sizeof(*greq));
412         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
413         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
414         greq->mask = cpu_to_be64(1);
415         t3_mgmt_tx(adap, skb);
416
417         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
418         t3_tp_set_offload_mode(adap, 0);
419         return i;
420 }
421
422 /**
423  *      setup_rss - configure RSS
424  *      @adap: the adapter
425  *
426  *      Sets up RSS to distribute packets to multiple receive queues.  We
427  *      configure the RSS CPU lookup table to distribute to the number of HW
428  *      receive queues, and the response queue lookup table to narrow that
429  *      down to the response queues actually configured for each port.
430  *      We always configure the RSS mapping for two ports since the mapping
431  *      table has plenty of entries.
432  */
433 static void setup_rss(struct adapter *adap)
434 {
435         int i;
436         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
437         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
438         u8 cpus[SGE_QSETS + 1];
439         u16 rspq_map[RSS_TABLE_SIZE];
440
441         for (i = 0; i < SGE_QSETS; ++i)
442                 cpus[i] = i;
443         cpus[SGE_QSETS] = 0xff; /* terminator */
444
445         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
446                 rspq_map[i] = i % nq0;
447                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
448         }
449
450         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
451                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
452                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
453 }
454
455 static void init_napi(struct adapter *adap)
456 {
457         int i;
458
459         for (i = 0; i < SGE_QSETS; i++) {
460                 struct sge_qset *qs = &adap->sge.qs[i];
461
462                 if (qs->adap)
463                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
464                                        64);
465         }
466
467         /*
468          * netif_napi_add() can be called only once per napi_struct because it
469          * adds each new napi_struct to a list.  Be careful not to call it a
470          * second time, e.g., during EEH recovery, by making a note of it.
471          */
472         adap->flags |= NAPI_INIT;
473 }
474
475 /*
476  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
477  * both netdevices representing interfaces and the dummy ones for the extra
478  * queues.
479  */
480 static void quiesce_rx(struct adapter *adap)
481 {
482         int i;
483
484         for (i = 0; i < SGE_QSETS; i++)
485                 if (adap->sge.qs[i].adap)
486                         napi_disable(&adap->sge.qs[i].napi);
487 }
488
489 static void enable_all_napi(struct adapter *adap)
490 {
491         int i;
492         for (i = 0; i < SGE_QSETS; i++)
493                 if (adap->sge.qs[i].adap)
494                         napi_enable(&adap->sge.qs[i].napi);
495 }
496
497 /**
498  *      set_qset_lro - Turn a queue set's LRO capability on and off
499  *      @dev: the device the qset is attached to
500  *      @qset_idx: the queue set index
501  *      @val: the LRO switch
502  *
503  *      Sets LRO on or off for a particular queue set.
504  *      the device's features flag is updated to reflect the LRO
505  *      capability when all queues belonging to the device are
506  *      in the same state.
507  */
508 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 {
510         struct port_info *pi = netdev_priv(dev);
511         struct adapter *adapter = pi->adapter;
512
513         adapter->params.sge.qset[qset_idx].lro = !!val;
514         adapter->sge.qs[qset_idx].lro_enabled = !!val;
515 }
516
517 /**
518  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
519  *      @adap: the adapter
520  *
521  *      Determines how many sets of SGE queues to use and initializes them.
522  *      We support multiple queue sets per port if we have MSI-X, otherwise
523  *      just one queue set per port.
524  */
525 static int setup_sge_qsets(struct adapter *adap)
526 {
527         int i, j, err, irq_idx = 0, qset_idx = 0;
528         unsigned int ntxq = SGE_TXQ_PER_SET;
529
530         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
531                 irq_idx = -1;
532
533         for_each_port(adap, i) {
534                 struct net_device *dev = adap->port[i];
535                 struct port_info *pi = netdev_priv(dev);
536
537                 pi->qs = &adap->sge.qs[pi->first_qset];
538                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
539                      ++j, ++qset_idx) {
540                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
541                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
542                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
543                                                              irq_idx,
544                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
545                                 netdev_get_tx_queue(dev, j));
546                         if (err) {
547                                 t3_stop_sge_timers(adap);
548                                 t3_free_sge_resources(adap);
549                                 return err;
550                         }
551                 }
552         }
553
554         return 0;
555 }
556
557 static ssize_t attr_show(struct device *d, char *buf,
558                          ssize_t(*format) (struct net_device *, char *))
559 {
560         ssize_t len;
561
562         /* Synchronize with ioctls that may shut down the device */
563         rtnl_lock();
564         len = (*format) (to_net_dev(d), buf);
565         rtnl_unlock();
566         return len;
567 }
568
569 static ssize_t attr_store(struct device *d,
570                           const char *buf, size_t len,
571                           ssize_t(*set) (struct net_device *, unsigned int),
572                           unsigned int min_val, unsigned int max_val)
573 {
574         char *endp;
575         ssize_t ret;
576         unsigned int val;
577
578         if (!capable(CAP_NET_ADMIN))
579                 return -EPERM;
580
581         val = simple_strtoul(buf, &endp, 0);
582         if (endp == buf || val < min_val || val > max_val)
583                 return -EINVAL;
584
585         rtnl_lock();
586         ret = (*set) (to_net_dev(d), val);
587         if (!ret)
588                 ret = len;
589         rtnl_unlock();
590         return ret;
591 }
592
593 #define CXGB3_SHOW(name, val_expr) \
594 static ssize_t format_##name(struct net_device *dev, char *buf) \
595 { \
596         struct port_info *pi = netdev_priv(dev); \
597         struct adapter *adap = pi->adapter; \
598         return sprintf(buf, "%u\n", val_expr); \
599 } \
600 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
601                            char *buf) \
602 { \
603         return attr_show(d, buf, format_##name); \
604 }
605
606 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
607 {
608         struct port_info *pi = netdev_priv(dev);
609         struct adapter *adap = pi->adapter;
610         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
611
612         if (adap->flags & FULL_INIT_DONE)
613                 return -EBUSY;
614         if (val && adap->params.rev == 0)
615                 return -EINVAL;
616         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
617             min_tids)
618                 return -EINVAL;
619         adap->params.mc5.nfilters = val;
620         return 0;
621 }
622
623 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
624                               const char *buf, size_t len)
625 {
626         return attr_store(d, buf, len, set_nfilters, 0, ~0);
627 }
628
629 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
630 {
631         struct port_info *pi = netdev_priv(dev);
632         struct adapter *adap = pi->adapter;
633
634         if (adap->flags & FULL_INIT_DONE)
635                 return -EBUSY;
636         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
637             MC5_MIN_TIDS)
638                 return -EINVAL;
639         adap->params.mc5.nservers = val;
640         return 0;
641 }
642
643 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
644                               const char *buf, size_t len)
645 {
646         return attr_store(d, buf, len, set_nservers, 0, ~0);
647 }
648
649 #define CXGB3_ATTR_R(name, val_expr) \
650 CXGB3_SHOW(name, val_expr) \
651 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
652
653 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
654 CXGB3_SHOW(name, val_expr) \
655 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
656
657 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
658 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
659 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
660
661 static struct attribute *cxgb3_attrs[] = {
662         &dev_attr_cam_size.attr,
663         &dev_attr_nfilters.attr,
664         &dev_attr_nservers.attr,
665         NULL
666 };
667
668 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
669
670 static ssize_t tm_attr_show(struct device *d,
671                             char *buf, int sched)
672 {
673         struct port_info *pi = netdev_priv(to_net_dev(d));
674         struct adapter *adap = pi->adapter;
675         unsigned int v, addr, bpt, cpt;
676         ssize_t len;
677
678         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
679         rtnl_lock();
680         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
681         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
682         if (sched & 1)
683                 v >>= 16;
684         bpt = (v >> 8) & 0xff;
685         cpt = v & 0xff;
686         if (!cpt)
687                 len = sprintf(buf, "disabled\n");
688         else {
689                 v = (adap->params.vpd.cclk * 1000) / cpt;
690                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
691         }
692         rtnl_unlock();
693         return len;
694 }
695
696 static ssize_t tm_attr_store(struct device *d,
697                              const char *buf, size_t len, int sched)
698 {
699         struct port_info *pi = netdev_priv(to_net_dev(d));
700         struct adapter *adap = pi->adapter;
701         unsigned int val;
702         char *endp;
703         ssize_t ret;
704
705         if (!capable(CAP_NET_ADMIN))
706                 return -EPERM;
707
708         val = simple_strtoul(buf, &endp, 0);
709         if (endp == buf || val > 10000000)
710                 return -EINVAL;
711
712         rtnl_lock();
713         ret = t3_config_sched(adap, val, sched);
714         if (!ret)
715                 ret = len;
716         rtnl_unlock();
717         return ret;
718 }
719
720 #define TM_ATTR(name, sched) \
721 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
722                            char *buf) \
723 { \
724         return tm_attr_show(d, buf, sched); \
725 } \
726 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
727                             const char *buf, size_t len) \
728 { \
729         return tm_attr_store(d, buf, len, sched); \
730 } \
731 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
732
733 TM_ATTR(sched0, 0);
734 TM_ATTR(sched1, 1);
735 TM_ATTR(sched2, 2);
736 TM_ATTR(sched3, 3);
737 TM_ATTR(sched4, 4);
738 TM_ATTR(sched5, 5);
739 TM_ATTR(sched6, 6);
740 TM_ATTR(sched7, 7);
741
742 static struct attribute *offload_attrs[] = {
743         &dev_attr_sched0.attr,
744         &dev_attr_sched1.attr,
745         &dev_attr_sched2.attr,
746         &dev_attr_sched3.attr,
747         &dev_attr_sched4.attr,
748         &dev_attr_sched5.attr,
749         &dev_attr_sched6.attr,
750         &dev_attr_sched7.attr,
751         NULL
752 };
753
754 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
755
756 /*
757  * Sends an sk_buff to an offload queue driver
758  * after dealing with any active network taps.
759  */
760 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
761 {
762         int ret;
763
764         local_bh_disable();
765         ret = t3_offload_tx(tdev, skb);
766         local_bh_enable();
767         return ret;
768 }
769
770 static int write_smt_entry(struct adapter *adapter, int idx)
771 {
772         struct cpl_smt_write_req *req;
773         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
774
775         if (!skb)
776                 return -ENOMEM;
777
778         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
779         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
780         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
781         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
782         req->iff = idx;
783         memset(req->src_mac1, 0, sizeof(req->src_mac1));
784         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
785         skb->priority = 1;
786         offload_tx(&adapter->tdev, skb);
787         return 0;
788 }
789
790 static int init_smt(struct adapter *adapter)
791 {
792         int i;
793
794         for_each_port(adapter, i)
795             write_smt_entry(adapter, i);
796         return 0;
797 }
798
799 static void init_port_mtus(struct adapter *adapter)
800 {
801         unsigned int mtus = adapter->port[0]->mtu;
802
803         if (adapter->port[1])
804                 mtus |= adapter->port[1]->mtu << 16;
805         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
806 }
807
808 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
809                               int hi, int port)
810 {
811         struct sk_buff *skb;
812         struct mngt_pktsched_wr *req;
813         int ret;
814
815         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
816         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
817         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
818         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
819         req->sched = sched;
820         req->idx = qidx;
821         req->min = lo;
822         req->max = hi;
823         req->binding = port;
824         ret = t3_mgmt_tx(adap, skb);
825
826         return ret;
827 }
828
829 static int bind_qsets(struct adapter *adap)
830 {
831         int i, j, err = 0;
832
833         for_each_port(adap, i) {
834                 const struct port_info *pi = adap2pinfo(adap, i);
835
836                 for (j = 0; j < pi->nqsets; ++j) {
837                         int ret = send_pktsched_cmd(adap, 1,
838                                                     pi->first_qset + j, -1,
839                                                     -1, i);
840                         if (ret)
841                                 err = ret;
842                 }
843         }
844
845         return err;
846 }
847
848 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
849 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
850
851 static int upgrade_fw(struct adapter *adap)
852 {
853         int ret;
854         char buf[64];
855         const struct firmware *fw;
856         struct device *dev = &adap->pdev->dev;
857
858         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
859                  FW_VERSION_MINOR, FW_VERSION_MICRO);
860         ret = request_firmware(&fw, buf, dev);
861         if (ret < 0) {
862                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
863                         buf);
864                 return ret;
865         }
866         ret = t3_load_fw(adap, fw->data, fw->size);
867         release_firmware(fw);
868
869         if (ret == 0)
870                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
871                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
872         else
873                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
874                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
875
876         return ret;
877 }
878
879 static inline char t3rev2char(struct adapter *adapter)
880 {
881         char rev = 0;
882
883         switch(adapter->params.rev) {
884         case T3_REV_B:
885         case T3_REV_B2:
886                 rev = 'b';
887                 break;
888         case T3_REV_C:
889                 rev = 'c';
890                 break;
891         }
892         return rev;
893 }
894
895 static int update_tpsram(struct adapter *adap)
896 {
897         const struct firmware *tpsram;
898         char buf[64];
899         struct device *dev = &adap->pdev->dev;
900         int ret;
901         char rev;
902
903         rev = t3rev2char(adap);
904         if (!rev)
905                 return 0;
906
907         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
908                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
909
910         ret = request_firmware(&tpsram, buf, dev);
911         if (ret < 0) {
912                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
913                         buf);
914                 return ret;
915         }
916
917         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
918         if (ret)
919                 goto release_tpsram;
920
921         ret = t3_set_proto_sram(adap, tpsram->data);
922         if (ret == 0)
923                 dev_info(dev,
924                          "successful update of protocol engine "
925                          "to %d.%d.%d\n",
926                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
927         else
928                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
929                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
930         if (ret)
931                 dev_err(dev, "loading protocol SRAM failed\n");
932
933 release_tpsram:
934         release_firmware(tpsram);
935
936         return ret;
937 }
938
939 /**
940  *      cxgb_up - enable the adapter
941  *      @adapter: adapter being enabled
942  *
943  *      Called when the first port is enabled, this function performs the
944  *      actions necessary to make an adapter operational, such as completing
945  *      the initialization of HW modules, and enabling interrupts.
946  *
947  *      Must be called with the rtnl lock held.
948  */
949 static int cxgb_up(struct adapter *adap)
950 {
951         int err;
952
953         if (!(adap->flags & FULL_INIT_DONE)) {
954                 err = t3_check_fw_version(adap);
955                 if (err == -EINVAL) {
956                         err = upgrade_fw(adap);
957                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
958                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
959                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
960                 }
961
962                 err = t3_check_tpsram_version(adap);
963                 if (err == -EINVAL) {
964                         err = update_tpsram(adap);
965                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
966                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
967                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
968                 }
969
970                 /*
971                  * Clear interrupts now to catch errors if t3_init_hw fails.
972                  * We clear them again later as initialization may trigger
973                  * conditions that can interrupt.
974                  */
975                 t3_intr_clear(adap);
976
977                 err = t3_init_hw(adap, 0);
978                 if (err)
979                         goto out;
980
981                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
982                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
983
984                 err = setup_sge_qsets(adap);
985                 if (err)
986                         goto out;
987
988                 setup_rss(adap);
989                 if (!(adap->flags & NAPI_INIT))
990                         init_napi(adap);
991                 adap->flags |= FULL_INIT_DONE;
992         }
993
994         t3_intr_clear(adap);
995
996         if (adap->flags & USING_MSIX) {
997                 name_msix_vecs(adap);
998                 err = request_irq(adap->msix_info[0].vec,
999                                   t3_async_intr_handler, 0,
1000                                   adap->msix_info[0].desc, adap);
1001                 if (err)
1002                         goto irq_err;
1003
1004                 err = request_msix_data_irqs(adap);
1005                 if (err) {
1006                         free_irq(adap->msix_info[0].vec, adap);
1007                         goto irq_err;
1008                 }
1009         } else if ((err = request_irq(adap->pdev->irq,
1010                                       t3_intr_handler(adap,
1011                                                       adap->sge.qs[0].rspq.
1012                                                       polling),
1013                                       (adap->flags & USING_MSI) ?
1014                                        0 : IRQF_SHARED,
1015                                       adap->name, adap)))
1016                 goto irq_err;
1017
1018         enable_all_napi(adap);
1019         t3_sge_start(adap);
1020         t3_intr_enable(adap);
1021
1022         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1023             is_offload(adap) && init_tp_parity(adap) == 0)
1024                 adap->flags |= TP_PARITY_INIT;
1025
1026         if (adap->flags & TP_PARITY_INIT) {
1027                 t3_write_reg(adap, A_TP_INT_CAUSE,
1028                              F_CMCACHEPERR | F_ARPLUTPERR);
1029                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1030         }
1031
1032         if (!(adap->flags & QUEUES_BOUND)) {
1033                 err = bind_qsets(adap);
1034                 if (err) {
1035                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1036                         t3_intr_disable(adap);
1037                         free_irq_resources(adap);
1038                         goto out;
1039                 }
1040                 adap->flags |= QUEUES_BOUND;
1041         }
1042
1043 out:
1044         return err;
1045 irq_err:
1046         CH_ERR(adap, "request_irq failed, err %d\n", err);
1047         goto out;
1048 }
1049
1050 /*
1051  * Release resources when all the ports and offloading have been stopped.
1052  */
1053 static void cxgb_down(struct adapter *adapter)
1054 {
1055         t3_sge_stop(adapter);
1056         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1057         t3_intr_disable(adapter);
1058         spin_unlock_irq(&adapter->work_lock);
1059
1060         free_irq_resources(adapter);
1061         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1062         quiesce_rx(adapter);
1063 }
1064
1065 static void schedule_chk_task(struct adapter *adap)
1066 {
1067         unsigned int timeo;
1068
1069         timeo = adap->params.linkpoll_period ?
1070             (HZ * adap->params.linkpoll_period) / 10 :
1071             adap->params.stats_update_period * HZ;
1072         if (timeo)
1073                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1074 }
1075
1076 static int offload_open(struct net_device *dev)
1077 {
1078         struct port_info *pi = netdev_priv(dev);
1079         struct adapter *adapter = pi->adapter;
1080         struct t3cdev *tdev = dev2t3cdev(dev);
1081         int adap_up = adapter->open_device_map & PORT_MASK;
1082         int err;
1083
1084         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1085                 return 0;
1086
1087         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1088                 goto out;
1089
1090         t3_tp_set_offload_mode(adapter, 1);
1091         tdev->lldev = adapter->port[0];
1092         err = cxgb3_offload_activate(adapter);
1093         if (err)
1094                 goto out;
1095
1096         init_port_mtus(adapter);
1097         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1098                      adapter->params.b_wnd,
1099                      adapter->params.rev == 0 ?
1100                      adapter->port[0]->mtu : 0xffff);
1101         init_smt(adapter);
1102
1103         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1104                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1105
1106         /* Call back all registered clients */
1107         cxgb3_add_clients(tdev);
1108
1109 out:
1110         /* restore them in case the offload module has changed them */
1111         if (err) {
1112                 t3_tp_set_offload_mode(adapter, 0);
1113                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1114                 cxgb3_set_dummy_ops(tdev);
1115         }
1116         return err;
1117 }
1118
1119 static int offload_close(struct t3cdev *tdev)
1120 {
1121         struct adapter *adapter = tdev2adap(tdev);
1122
1123         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1124                 return 0;
1125
1126         /* Call back all registered clients */
1127         cxgb3_remove_clients(tdev);
1128
1129         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1130
1131         tdev->lldev = NULL;
1132         cxgb3_set_dummy_ops(tdev);
1133         t3_tp_set_offload_mode(adapter, 0);
1134         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1135
1136         if (!adapter->open_device_map)
1137                 cxgb_down(adapter);
1138
1139         cxgb3_offload_deactivate(adapter);
1140         return 0;
1141 }
1142
1143 static int cxgb_open(struct net_device *dev)
1144 {
1145         struct port_info *pi = netdev_priv(dev);
1146         struct adapter *adapter = pi->adapter;
1147         int other_ports = adapter->open_device_map & PORT_MASK;
1148         int err;
1149
1150         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1151                 return err;
1152
1153         set_bit(pi->port_id, &adapter->open_device_map);
1154         if (is_offload(adapter) && !ofld_disable) {
1155                 err = offload_open(dev);
1156                 if (err)
1157                         printk(KERN_WARNING
1158                                "Could not initialize offload capabilities\n");
1159         }
1160
1161         dev->real_num_tx_queues = pi->nqsets;
1162         link_start(dev);
1163         t3_port_intr_enable(adapter, pi->port_id);
1164         netif_tx_start_all_queues(dev);
1165         if (!other_ports)
1166                 schedule_chk_task(adapter);
1167
1168         return 0;
1169 }
1170
1171 static int cxgb_close(struct net_device *dev)
1172 {
1173         struct port_info *pi = netdev_priv(dev);
1174         struct adapter *adapter = pi->adapter;
1175
1176         t3_port_intr_disable(adapter, pi->port_id);
1177         netif_tx_stop_all_queues(dev);
1178         pi->phy.ops->power_down(&pi->phy, 1);
1179         netif_carrier_off(dev);
1180         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1181
1182         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1183         clear_bit(pi->port_id, &adapter->open_device_map);
1184         spin_unlock_irq(&adapter->work_lock);
1185
1186         if (!(adapter->open_device_map & PORT_MASK))
1187                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1188                                                   &adapter->adap_check_task);
1189
1190         if (!adapter->open_device_map)
1191                 cxgb_down(adapter);
1192
1193         return 0;
1194 }
1195
1196 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1197 {
1198         struct port_info *pi = netdev_priv(dev);
1199         struct adapter *adapter = pi->adapter;
1200         struct net_device_stats *ns = &pi->netstats;
1201         const struct mac_stats *pstats;
1202
1203         spin_lock(&adapter->stats_lock);
1204         pstats = t3_mac_update_stats(&pi->mac);
1205         spin_unlock(&adapter->stats_lock);
1206
1207         ns->tx_bytes = pstats->tx_octets;
1208         ns->tx_packets = pstats->tx_frames;
1209         ns->rx_bytes = pstats->rx_octets;
1210         ns->rx_packets = pstats->rx_frames;
1211         ns->multicast = pstats->rx_mcast_frames;
1212
1213         ns->tx_errors = pstats->tx_underrun;
1214         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1215             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1216             pstats->rx_fifo_ovfl;
1217
1218         /* detailed rx_errors */
1219         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1220         ns->rx_over_errors = 0;
1221         ns->rx_crc_errors = pstats->rx_fcs_errs;
1222         ns->rx_frame_errors = pstats->rx_symbol_errs;
1223         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1224         ns->rx_missed_errors = pstats->rx_cong_drops;
1225
1226         /* detailed tx_errors */
1227         ns->tx_aborted_errors = 0;
1228         ns->tx_carrier_errors = 0;
1229         ns->tx_fifo_errors = pstats->tx_underrun;
1230         ns->tx_heartbeat_errors = 0;
1231         ns->tx_window_errors = 0;
1232         return ns;
1233 }
1234
1235 static u32 get_msglevel(struct net_device *dev)
1236 {
1237         struct port_info *pi = netdev_priv(dev);
1238         struct adapter *adapter = pi->adapter;
1239
1240         return adapter->msg_enable;
1241 }
1242
1243 static void set_msglevel(struct net_device *dev, u32 val)
1244 {
1245         struct port_info *pi = netdev_priv(dev);
1246         struct adapter *adapter = pi->adapter;
1247
1248         adapter->msg_enable = val;
1249 }
1250
1251 static char stats_strings[][ETH_GSTRING_LEN] = {
1252         "TxOctetsOK         ",
1253         "TxFramesOK         ",
1254         "TxMulticastFramesOK",
1255         "TxBroadcastFramesOK",
1256         "TxPauseFrames      ",
1257         "TxUnderrun         ",
1258         "TxExtUnderrun      ",
1259
1260         "TxFrames64         ",
1261         "TxFrames65To127    ",
1262         "TxFrames128To255   ",
1263         "TxFrames256To511   ",
1264         "TxFrames512To1023  ",
1265         "TxFrames1024To1518 ",
1266         "TxFrames1519ToMax  ",
1267
1268         "RxOctetsOK         ",
1269         "RxFramesOK         ",
1270         "RxMulticastFramesOK",
1271         "RxBroadcastFramesOK",
1272         "RxPauseFrames      ",
1273         "RxFCSErrors        ",
1274         "RxSymbolErrors     ",
1275         "RxShortErrors      ",
1276         "RxJabberErrors     ",
1277         "RxLengthErrors     ",
1278         "RxFIFOoverflow     ",
1279
1280         "RxFrames64         ",
1281         "RxFrames65To127    ",
1282         "RxFrames128To255   ",
1283         "RxFrames256To511   ",
1284         "RxFrames512To1023  ",
1285         "RxFrames1024To1518 ",
1286         "RxFrames1519ToMax  ",
1287
1288         "PhyFIFOErrors      ",
1289         "TSO                ",
1290         "VLANextractions    ",
1291         "VLANinsertions     ",
1292         "TxCsumOffload      ",
1293         "RxCsumGood         ",
1294         "LroAggregated      ",
1295         "LroFlushed         ",
1296         "LroNoDesc          ",
1297         "RxDrops            ",
1298
1299         "CheckTXEnToggled   ",
1300         "CheckResets        ",
1301
1302 };
1303
1304 static int get_sset_count(struct net_device *dev, int sset)
1305 {
1306         switch (sset) {
1307         case ETH_SS_STATS:
1308                 return ARRAY_SIZE(stats_strings);
1309         default:
1310                 return -EOPNOTSUPP;
1311         }
1312 }
1313
1314 #define T3_REGMAP_SIZE (3 * 1024)
1315
1316 static int get_regs_len(struct net_device *dev)
1317 {
1318         return T3_REGMAP_SIZE;
1319 }
1320
1321 static int get_eeprom_len(struct net_device *dev)
1322 {
1323         return EEPROMSIZE;
1324 }
1325
1326 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1327 {
1328         struct port_info *pi = netdev_priv(dev);
1329         struct adapter *adapter = pi->adapter;
1330         u32 fw_vers = 0;
1331         u32 tp_vers = 0;
1332
1333         spin_lock(&adapter->stats_lock);
1334         t3_get_fw_version(adapter, &fw_vers);
1335         t3_get_tp_version(adapter, &tp_vers);
1336         spin_unlock(&adapter->stats_lock);
1337
1338         strcpy(info->driver, DRV_NAME);
1339         strcpy(info->version, DRV_VERSION);
1340         strcpy(info->bus_info, pci_name(adapter->pdev));
1341         if (!fw_vers)
1342                 strcpy(info->fw_version, "N/A");
1343         else {
1344                 snprintf(info->fw_version, sizeof(info->fw_version),
1345                          "%s %u.%u.%u TP %u.%u.%u",
1346                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1347                          G_FW_VERSION_MAJOR(fw_vers),
1348                          G_FW_VERSION_MINOR(fw_vers),
1349                          G_FW_VERSION_MICRO(fw_vers),
1350                          G_TP_VERSION_MAJOR(tp_vers),
1351                          G_TP_VERSION_MINOR(tp_vers),
1352                          G_TP_VERSION_MICRO(tp_vers));
1353         }
1354 }
1355
1356 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1357 {
1358         if (stringset == ETH_SS_STATS)
1359                 memcpy(data, stats_strings, sizeof(stats_strings));
1360 }
1361
1362 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1363                                             struct port_info *p, int idx)
1364 {
1365         int i;
1366         unsigned long tot = 0;
1367
1368         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1369                 tot += adapter->sge.qs[i].port_stats[idx];
1370         return tot;
1371 }
1372
1373 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1374                       u64 *data)
1375 {
1376         struct port_info *pi = netdev_priv(dev);
1377         struct adapter *adapter = pi->adapter;
1378         const struct mac_stats *s;
1379
1380         spin_lock(&adapter->stats_lock);
1381         s = t3_mac_update_stats(&pi->mac);
1382         spin_unlock(&adapter->stats_lock);
1383
1384         *data++ = s->tx_octets;
1385         *data++ = s->tx_frames;
1386         *data++ = s->tx_mcast_frames;
1387         *data++ = s->tx_bcast_frames;
1388         *data++ = s->tx_pause;
1389         *data++ = s->tx_underrun;
1390         *data++ = s->tx_fifo_urun;
1391
1392         *data++ = s->tx_frames_64;
1393         *data++ = s->tx_frames_65_127;
1394         *data++ = s->tx_frames_128_255;
1395         *data++ = s->tx_frames_256_511;
1396         *data++ = s->tx_frames_512_1023;
1397         *data++ = s->tx_frames_1024_1518;
1398         *data++ = s->tx_frames_1519_max;
1399
1400         *data++ = s->rx_octets;
1401         *data++ = s->rx_frames;
1402         *data++ = s->rx_mcast_frames;
1403         *data++ = s->rx_bcast_frames;
1404         *data++ = s->rx_pause;
1405         *data++ = s->rx_fcs_errs;
1406         *data++ = s->rx_symbol_errs;
1407         *data++ = s->rx_short;
1408         *data++ = s->rx_jabber;
1409         *data++ = s->rx_too_long;
1410         *data++ = s->rx_fifo_ovfl;
1411
1412         *data++ = s->rx_frames_64;
1413         *data++ = s->rx_frames_65_127;
1414         *data++ = s->rx_frames_128_255;
1415         *data++ = s->rx_frames_256_511;
1416         *data++ = s->rx_frames_512_1023;
1417         *data++ = s->rx_frames_1024_1518;
1418         *data++ = s->rx_frames_1519_max;
1419
1420         *data++ = pi->phy.fifo_errors;
1421
1422         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1423         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1424         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1425         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1426         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1427         *data++ = 0;
1428         *data++ = 0;
1429         *data++ = 0;
1430         *data++ = s->rx_cong_drops;
1431
1432         *data++ = s->num_toggled;
1433         *data++ = s->num_resets;
1434 }
1435
1436 static inline void reg_block_dump(struct adapter *ap, void *buf,
1437                                   unsigned int start, unsigned int end)
1438 {
1439         u32 *p = buf + start;
1440
1441         for (; start <= end; start += sizeof(u32))
1442                 *p++ = t3_read_reg(ap, start);
1443 }
1444
1445 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1446                      void *buf)
1447 {
1448         struct port_info *pi = netdev_priv(dev);
1449         struct adapter *ap = pi->adapter;
1450
1451         /*
1452          * Version scheme:
1453          * bits 0..9: chip version
1454          * bits 10..15: chip revision
1455          * bit 31: set for PCIe cards
1456          */
1457         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1458
1459         /*
1460          * We skip the MAC statistics registers because they are clear-on-read.
1461          * Also reading multi-register stats would need to synchronize with the
1462          * periodic mac stats accumulation.  Hard to justify the complexity.
1463          */
1464         memset(buf, 0, T3_REGMAP_SIZE);
1465         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1466         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1467         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1468         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1469         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1470         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1471                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1472         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1473                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1474 }
1475
1476 static int restart_autoneg(struct net_device *dev)
1477 {
1478         struct port_info *p = netdev_priv(dev);
1479
1480         if (!netif_running(dev))
1481                 return -EAGAIN;
1482         if (p->link_config.autoneg != AUTONEG_ENABLE)
1483                 return -EINVAL;
1484         p->phy.ops->autoneg_restart(&p->phy);
1485         return 0;
1486 }
1487
1488 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1489 {
1490         struct port_info *pi = netdev_priv(dev);
1491         struct adapter *adapter = pi->adapter;
1492         int i;
1493
1494         if (data == 0)
1495                 data = 2;
1496
1497         for (i = 0; i < data * 2; i++) {
1498                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1499                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1500                 if (msleep_interruptible(500))
1501                         break;
1502         }
1503         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1504                          F_GPIO0_OUT_VAL);
1505         return 0;
1506 }
1507
1508 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1509 {
1510         struct port_info *p = netdev_priv(dev);
1511
1512         cmd->supported = p->link_config.supported;
1513         cmd->advertising = p->link_config.advertising;
1514
1515         if (netif_carrier_ok(dev)) {
1516                 cmd->speed = p->link_config.speed;
1517                 cmd->duplex = p->link_config.duplex;
1518         } else {
1519                 cmd->speed = -1;
1520                 cmd->duplex = -1;
1521         }
1522
1523         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1524         cmd->phy_address = p->phy.addr;
1525         cmd->transceiver = XCVR_EXTERNAL;
1526         cmd->autoneg = p->link_config.autoneg;
1527         cmd->maxtxpkt = 0;
1528         cmd->maxrxpkt = 0;
1529         return 0;
1530 }
1531
1532 static int speed_duplex_to_caps(int speed, int duplex)
1533 {
1534         int cap = 0;
1535
1536         switch (speed) {
1537         case SPEED_10:
1538                 if (duplex == DUPLEX_FULL)
1539                         cap = SUPPORTED_10baseT_Full;
1540                 else
1541                         cap = SUPPORTED_10baseT_Half;
1542                 break;
1543         case SPEED_100:
1544                 if (duplex == DUPLEX_FULL)
1545                         cap = SUPPORTED_100baseT_Full;
1546                 else
1547                         cap = SUPPORTED_100baseT_Half;
1548                 break;
1549         case SPEED_1000:
1550                 if (duplex == DUPLEX_FULL)
1551                         cap = SUPPORTED_1000baseT_Full;
1552                 else
1553                         cap = SUPPORTED_1000baseT_Half;
1554                 break;
1555         case SPEED_10000:
1556                 if (duplex == DUPLEX_FULL)
1557                         cap = SUPPORTED_10000baseT_Full;
1558         }
1559         return cap;
1560 }
1561
1562 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1563                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1564                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1565                       ADVERTISED_10000baseT_Full)
1566
1567 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1568 {
1569         struct port_info *p = netdev_priv(dev);
1570         struct link_config *lc = &p->link_config;
1571
1572         if (!(lc->supported & SUPPORTED_Autoneg)) {
1573                 /*
1574                  * PHY offers a single speed/duplex.  See if that's what's
1575                  * being requested.
1576                  */
1577                 if (cmd->autoneg == AUTONEG_DISABLE) {
1578                         int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1579                         if (lc->supported & cap)
1580                                 return 0;
1581                 }
1582                 return -EINVAL;
1583         }
1584
1585         if (cmd->autoneg == AUTONEG_DISABLE) {
1586                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1587
1588                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1589                         return -EINVAL;
1590                 lc->requested_speed = cmd->speed;
1591                 lc->requested_duplex = cmd->duplex;
1592                 lc->advertising = 0;
1593         } else {
1594                 cmd->advertising &= ADVERTISED_MASK;
1595                 cmd->advertising &= lc->supported;
1596                 if (!cmd->advertising)
1597                         return -EINVAL;
1598                 lc->requested_speed = SPEED_INVALID;
1599                 lc->requested_duplex = DUPLEX_INVALID;
1600                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1601         }
1602         lc->autoneg = cmd->autoneg;
1603         if (netif_running(dev))
1604                 t3_link_start(&p->phy, &p->mac, lc);
1605         return 0;
1606 }
1607
1608 static void get_pauseparam(struct net_device *dev,
1609                            struct ethtool_pauseparam *epause)
1610 {
1611         struct port_info *p = netdev_priv(dev);
1612
1613         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1614         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1615         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1616 }
1617
1618 static int set_pauseparam(struct net_device *dev,
1619                           struct ethtool_pauseparam *epause)
1620 {
1621         struct port_info *p = netdev_priv(dev);
1622         struct link_config *lc = &p->link_config;
1623
1624         if (epause->autoneg == AUTONEG_DISABLE)
1625                 lc->requested_fc = 0;
1626         else if (lc->supported & SUPPORTED_Autoneg)
1627                 lc->requested_fc = PAUSE_AUTONEG;
1628         else
1629                 return -EINVAL;
1630
1631         if (epause->rx_pause)
1632                 lc->requested_fc |= PAUSE_RX;
1633         if (epause->tx_pause)
1634                 lc->requested_fc |= PAUSE_TX;
1635         if (lc->autoneg == AUTONEG_ENABLE) {
1636                 if (netif_running(dev))
1637                         t3_link_start(&p->phy, &p->mac, lc);
1638         } else {
1639                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1640                 if (netif_running(dev))
1641                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1642         }
1643         return 0;
1644 }
1645
1646 static u32 get_rx_csum(struct net_device *dev)
1647 {
1648         struct port_info *p = netdev_priv(dev);
1649
1650         return p->rx_offload & T3_RX_CSUM;
1651 }
1652
1653 static int set_rx_csum(struct net_device *dev, u32 data)
1654 {
1655         struct port_info *p = netdev_priv(dev);
1656
1657         if (data) {
1658                 p->rx_offload |= T3_RX_CSUM;
1659         } else {
1660                 int i;
1661
1662                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1663                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1664                         set_qset_lro(dev, i, 0);
1665         }
1666         return 0;
1667 }
1668
1669 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1670 {
1671         struct port_info *pi = netdev_priv(dev);
1672         struct adapter *adapter = pi->adapter;
1673         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1674
1675         e->rx_max_pending = MAX_RX_BUFFERS;
1676         e->rx_mini_max_pending = 0;
1677         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1678         e->tx_max_pending = MAX_TXQ_ENTRIES;
1679
1680         e->rx_pending = q->fl_size;
1681         e->rx_mini_pending = q->rspq_size;
1682         e->rx_jumbo_pending = q->jumbo_size;
1683         e->tx_pending = q->txq_size[0];
1684 }
1685
1686 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1687 {
1688         struct port_info *pi = netdev_priv(dev);
1689         struct adapter *adapter = pi->adapter;
1690         struct qset_params *q;
1691         int i;
1692
1693         if (e->rx_pending > MAX_RX_BUFFERS ||
1694             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1695             e->tx_pending > MAX_TXQ_ENTRIES ||
1696             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1697             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1698             e->rx_pending < MIN_FL_ENTRIES ||
1699             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1700             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1701                 return -EINVAL;
1702
1703         if (adapter->flags & FULL_INIT_DONE)
1704                 return -EBUSY;
1705
1706         q = &adapter->params.sge.qset[pi->first_qset];
1707         for (i = 0; i < pi->nqsets; ++i, ++q) {
1708                 q->rspq_size = e->rx_mini_pending;
1709                 q->fl_size = e->rx_pending;
1710                 q->jumbo_size = e->rx_jumbo_pending;
1711                 q->txq_size[0] = e->tx_pending;
1712                 q->txq_size[1] = e->tx_pending;
1713                 q->txq_size[2] = e->tx_pending;
1714         }
1715         return 0;
1716 }
1717
1718 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1719 {
1720         struct port_info *pi = netdev_priv(dev);
1721         struct adapter *adapter = pi->adapter;
1722         struct qset_params *qsp = &adapter->params.sge.qset[0];
1723         struct sge_qset *qs = &adapter->sge.qs[0];
1724
1725         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1726                 return -EINVAL;
1727
1728         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1729         t3_update_qset_coalesce(qs, qsp);
1730         return 0;
1731 }
1732
1733 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1734 {
1735         struct port_info *pi = netdev_priv(dev);
1736         struct adapter *adapter = pi->adapter;
1737         struct qset_params *q = adapter->params.sge.qset;
1738
1739         c->rx_coalesce_usecs = q->coalesce_usecs;
1740         return 0;
1741 }
1742
1743 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1744                       u8 * data)
1745 {
1746         struct port_info *pi = netdev_priv(dev);
1747         struct adapter *adapter = pi->adapter;
1748         int i, err = 0;
1749
1750         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1751         if (!buf)
1752                 return -ENOMEM;
1753
1754         e->magic = EEPROM_MAGIC;
1755         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1756                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1757
1758         if (!err)
1759                 memcpy(data, buf + e->offset, e->len);
1760         kfree(buf);
1761         return err;
1762 }
1763
1764 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1765                       u8 * data)
1766 {
1767         struct port_info *pi = netdev_priv(dev);
1768         struct adapter *adapter = pi->adapter;
1769         u32 aligned_offset, aligned_len;
1770         __le32 *p;
1771         u8 *buf;
1772         int err;
1773
1774         if (eeprom->magic != EEPROM_MAGIC)
1775                 return -EINVAL;
1776
1777         aligned_offset = eeprom->offset & ~3;
1778         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1779
1780         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1781                 buf = kmalloc(aligned_len, GFP_KERNEL);
1782                 if (!buf)
1783                         return -ENOMEM;
1784                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1785                 if (!err && aligned_len > 4)
1786                         err = t3_seeprom_read(adapter,
1787                                               aligned_offset + aligned_len - 4,
1788                                               (__le32 *) & buf[aligned_len - 4]);
1789                 if (err)
1790                         goto out;
1791                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1792         } else
1793                 buf = data;
1794
1795         err = t3_seeprom_wp(adapter, 0);
1796         if (err)
1797                 goto out;
1798
1799         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1800                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1801                 aligned_offset += 4;
1802         }
1803
1804         if (!err)
1805                 err = t3_seeprom_wp(adapter, 1);
1806 out:
1807         if (buf != data)
1808                 kfree(buf);
1809         return err;
1810 }
1811
1812 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1813 {
1814         wol->supported = 0;
1815         wol->wolopts = 0;
1816         memset(&wol->sopass, 0, sizeof(wol->sopass));
1817 }
1818
1819 static const struct ethtool_ops cxgb_ethtool_ops = {
1820         .get_settings = get_settings,
1821         .set_settings = set_settings,
1822         .get_drvinfo = get_drvinfo,
1823         .get_msglevel = get_msglevel,
1824         .set_msglevel = set_msglevel,
1825         .get_ringparam = get_sge_param,
1826         .set_ringparam = set_sge_param,
1827         .get_coalesce = get_coalesce,
1828         .set_coalesce = set_coalesce,
1829         .get_eeprom_len = get_eeprom_len,
1830         .get_eeprom = get_eeprom,
1831         .set_eeprom = set_eeprom,
1832         .get_pauseparam = get_pauseparam,
1833         .set_pauseparam = set_pauseparam,
1834         .get_rx_csum = get_rx_csum,
1835         .set_rx_csum = set_rx_csum,
1836         .set_tx_csum = ethtool_op_set_tx_csum,
1837         .set_sg = ethtool_op_set_sg,
1838         .get_link = ethtool_op_get_link,
1839         .get_strings = get_strings,
1840         .phys_id = cxgb3_phys_id,
1841         .nway_reset = restart_autoneg,
1842         .get_sset_count = get_sset_count,
1843         .get_ethtool_stats = get_stats,
1844         .get_regs_len = get_regs_len,
1845         .get_regs = get_regs,
1846         .get_wol = get_wol,
1847         .set_tso = ethtool_op_set_tso,
1848 };
1849
1850 static int in_range(int val, int lo, int hi)
1851 {
1852         return val < 0 || (val <= hi && val >= lo);
1853 }
1854
1855 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1856 {
1857         struct port_info *pi = netdev_priv(dev);
1858         struct adapter *adapter = pi->adapter;
1859         u32 cmd;
1860         int ret;
1861
1862         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1863                 return -EFAULT;
1864
1865         switch (cmd) {
1866         case CHELSIO_SET_QSET_PARAMS:{
1867                 int i;
1868                 struct qset_params *q;
1869                 struct ch_qset_params t;
1870                 int q1 = pi->first_qset;
1871                 int nqsets = pi->nqsets;
1872
1873                 if (!capable(CAP_NET_ADMIN))
1874                         return -EPERM;
1875                 if (copy_from_user(&t, useraddr, sizeof(t)))
1876                         return -EFAULT;
1877                 if (t.qset_idx >= SGE_QSETS)
1878                         return -EINVAL;
1879                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1880                         !in_range(t.cong_thres, 0, 255) ||
1881                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1882                                 MAX_TXQ_ENTRIES) ||
1883                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1884                                 MAX_TXQ_ENTRIES) ||
1885                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1886                                 MAX_CTRL_TXQ_ENTRIES) ||
1887                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1888                                 MAX_RX_BUFFERS)
1889                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1890                                         MAX_RX_JUMBO_BUFFERS)
1891                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1892                                         MAX_RSPQ_ENTRIES))
1893                         return -EINVAL;
1894
1895                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1896                         for_each_port(adapter, i) {
1897                                 pi = adap2pinfo(adapter, i);
1898                                 if (t.qset_idx >= pi->first_qset &&
1899                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1900                                     !(pi->rx_offload & T3_RX_CSUM))
1901                                         return -EINVAL;
1902                         }
1903
1904                 if ((adapter->flags & FULL_INIT_DONE) &&
1905                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1906                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1907                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1908                         t.polling >= 0 || t.cong_thres >= 0))
1909                         return -EBUSY;
1910
1911                 /* Allow setting of any available qset when offload enabled */
1912                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1913                         q1 = 0;
1914                         for_each_port(adapter, i) {
1915                                 pi = adap2pinfo(adapter, i);
1916                                 nqsets += pi->first_qset + pi->nqsets;
1917                         }
1918                 }
1919
1920                 if (t.qset_idx < q1)
1921                         return -EINVAL;
1922                 if (t.qset_idx > q1 + nqsets - 1)
1923                         return -EINVAL;
1924
1925                 q = &adapter->params.sge.qset[t.qset_idx];
1926
1927                 if (t.rspq_size >= 0)
1928                         q->rspq_size = t.rspq_size;
1929                 if (t.fl_size[0] >= 0)
1930                         q->fl_size = t.fl_size[0];
1931                 if (t.fl_size[1] >= 0)
1932                         q->jumbo_size = t.fl_size[1];
1933                 if (t.txq_size[0] >= 0)
1934                         q->txq_size[0] = t.txq_size[0];
1935                 if (t.txq_size[1] >= 0)
1936                         q->txq_size[1] = t.txq_size[1];
1937                 if (t.txq_size[2] >= 0)
1938                         q->txq_size[2] = t.txq_size[2];
1939                 if (t.cong_thres >= 0)
1940                         q->cong_thres = t.cong_thres;
1941                 if (t.intr_lat >= 0) {
1942                         struct sge_qset *qs =
1943                                 &adapter->sge.qs[t.qset_idx];
1944
1945                         q->coalesce_usecs = t.intr_lat;
1946                         t3_update_qset_coalesce(qs, q);
1947                 }
1948                 if (t.polling >= 0) {
1949                         if (adapter->flags & USING_MSIX)
1950                                 q->polling = t.polling;
1951                         else {
1952                                 /* No polling with INTx for T3A */
1953                                 if (adapter->params.rev == 0 &&
1954                                         !(adapter->flags & USING_MSI))
1955                                         t.polling = 0;
1956
1957                                 for (i = 0; i < SGE_QSETS; i++) {
1958                                         q = &adapter->params.sge.
1959                                                 qset[i];
1960                                         q->polling = t.polling;
1961                                 }
1962                         }
1963                 }
1964                 if (t.lro >= 0)
1965                         set_qset_lro(dev, t.qset_idx, t.lro);
1966
1967                 break;
1968         }
1969         case CHELSIO_GET_QSET_PARAMS:{
1970                 struct qset_params *q;
1971                 struct ch_qset_params t;
1972                 int q1 = pi->first_qset;
1973                 int nqsets = pi->nqsets;
1974                 int i;
1975
1976                 if (copy_from_user(&t, useraddr, sizeof(t)))
1977                         return -EFAULT;
1978
1979                 /* Display qsets for all ports when offload enabled */
1980                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1981                         q1 = 0;
1982                         for_each_port(adapter, i) {
1983                                 pi = adap2pinfo(adapter, i);
1984                                 nqsets = pi->first_qset + pi->nqsets;
1985                         }
1986                 }
1987
1988                 if (t.qset_idx >= nqsets)
1989                         return -EINVAL;
1990
1991                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
1992                 t.rspq_size = q->rspq_size;
1993                 t.txq_size[0] = q->txq_size[0];
1994                 t.txq_size[1] = q->txq_size[1];
1995                 t.txq_size[2] = q->txq_size[2];
1996                 t.fl_size[0] = q->fl_size;
1997                 t.fl_size[1] = q->jumbo_size;
1998                 t.polling = q->polling;
1999                 t.lro = q->lro;
2000                 t.intr_lat = q->coalesce_usecs;
2001                 t.cong_thres = q->cong_thres;
2002                 t.qnum = q1;
2003
2004                 if (adapter->flags & USING_MSIX)
2005                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2006                 else
2007                         t.vector = adapter->pdev->irq;
2008
2009                 if (copy_to_user(useraddr, &t, sizeof(t)))
2010                         return -EFAULT;
2011                 break;
2012         }
2013         case CHELSIO_SET_QSET_NUM:{
2014                 struct ch_reg edata;
2015                 unsigned int i, first_qset = 0, other_qsets = 0;
2016
2017                 if (!capable(CAP_NET_ADMIN))
2018                         return -EPERM;
2019                 if (adapter->flags & FULL_INIT_DONE)
2020                         return -EBUSY;
2021                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2022                         return -EFAULT;
2023                 if (edata.val < 1 ||
2024                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2025                         return -EINVAL;
2026
2027                 for_each_port(adapter, i)
2028                         if (adapter->port[i] && adapter->port[i] != dev)
2029                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2030
2031                 if (edata.val + other_qsets > SGE_QSETS)
2032                         return -EINVAL;
2033
2034                 pi->nqsets = edata.val;
2035
2036                 for_each_port(adapter, i)
2037                         if (adapter->port[i]) {
2038                                 pi = adap2pinfo(adapter, i);
2039                                 pi->first_qset = first_qset;
2040                                 first_qset += pi->nqsets;
2041                         }
2042                 break;
2043         }
2044         case CHELSIO_GET_QSET_NUM:{
2045                 struct ch_reg edata;
2046
2047                 edata.cmd = CHELSIO_GET_QSET_NUM;
2048                 edata.val = pi->nqsets;
2049                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2050                         return -EFAULT;
2051                 break;
2052         }
2053         case CHELSIO_LOAD_FW:{
2054                 u8 *fw_data;
2055                 struct ch_mem_range t;
2056
2057                 if (!capable(CAP_SYS_RAWIO))
2058                         return -EPERM;
2059                 if (copy_from_user(&t, useraddr, sizeof(t)))
2060                         return -EFAULT;
2061                 /* Check t.len sanity ? */
2062                 fw_data = kmalloc(t.len, GFP_KERNEL);
2063                 if (!fw_data)
2064                         return -ENOMEM;
2065
2066                 if (copy_from_user
2067                         (fw_data, useraddr + sizeof(t), t.len)) {
2068                         kfree(fw_data);
2069                         return -EFAULT;
2070                 }
2071
2072                 ret = t3_load_fw(adapter, fw_data, t.len);
2073                 kfree(fw_data);
2074                 if (ret)
2075                         return ret;
2076                 break;
2077         }
2078         case CHELSIO_SETMTUTAB:{
2079                 struct ch_mtus m;
2080                 int i;
2081
2082                 if (!is_offload(adapter))
2083                         return -EOPNOTSUPP;
2084                 if (!capable(CAP_NET_ADMIN))
2085                         return -EPERM;
2086                 if (offload_running(adapter))
2087                         return -EBUSY;
2088                 if (copy_from_user(&m, useraddr, sizeof(m)))
2089                         return -EFAULT;
2090                 if (m.nmtus != NMTUS)
2091                         return -EINVAL;
2092                 if (m.mtus[0] < 81)     /* accommodate SACK */
2093                         return -EINVAL;
2094
2095                 /* MTUs must be in ascending order */
2096                 for (i = 1; i < NMTUS; ++i)
2097                         if (m.mtus[i] < m.mtus[i - 1])
2098                                 return -EINVAL;
2099
2100                 memcpy(adapter->params.mtus, m.mtus,
2101                         sizeof(adapter->params.mtus));
2102                 break;
2103         }
2104         case CHELSIO_GET_PM:{
2105                 struct tp_params *p = &adapter->params.tp;
2106                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2107
2108                 if (!is_offload(adapter))
2109                         return -EOPNOTSUPP;
2110                 m.tx_pg_sz = p->tx_pg_size;
2111                 m.tx_num_pg = p->tx_num_pgs;
2112                 m.rx_pg_sz = p->rx_pg_size;
2113                 m.rx_num_pg = p->rx_num_pgs;
2114                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2115                 if (copy_to_user(useraddr, &m, sizeof(m)))
2116                         return -EFAULT;
2117                 break;
2118         }
2119         case CHELSIO_SET_PM:{
2120                 struct ch_pm m;
2121                 struct tp_params *p = &adapter->params.tp;
2122
2123                 if (!is_offload(adapter))
2124                         return -EOPNOTSUPP;
2125                 if (!capable(CAP_NET_ADMIN))
2126                         return -EPERM;
2127                 if (adapter->flags & FULL_INIT_DONE)
2128                         return -EBUSY;
2129                 if (copy_from_user(&m, useraddr, sizeof(m)))
2130                         return -EFAULT;
2131                 if (!is_power_of_2(m.rx_pg_sz) ||
2132                         !is_power_of_2(m.tx_pg_sz))
2133                         return -EINVAL; /* not power of 2 */
2134                 if (!(m.rx_pg_sz & 0x14000))
2135                         return -EINVAL; /* not 16KB or 64KB */
2136                 if (!(m.tx_pg_sz & 0x1554000))
2137                         return -EINVAL;
2138                 if (m.tx_num_pg == -1)
2139                         m.tx_num_pg = p->tx_num_pgs;
2140                 if (m.rx_num_pg == -1)
2141                         m.rx_num_pg = p->rx_num_pgs;
2142                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2143                         return -EINVAL;
2144                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2145                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2146                         return -EINVAL;
2147                 p->rx_pg_size = m.rx_pg_sz;
2148                 p->tx_pg_size = m.tx_pg_sz;
2149                 p->rx_num_pgs = m.rx_num_pg;
2150                 p->tx_num_pgs = m.tx_num_pg;
2151                 break;
2152         }
2153         case CHELSIO_GET_MEM:{
2154                 struct ch_mem_range t;
2155                 struct mc7 *mem;
2156                 u64 buf[32];
2157
2158                 if (!is_offload(adapter))
2159                         return -EOPNOTSUPP;
2160                 if (!(adapter->flags & FULL_INIT_DONE))
2161                         return -EIO;    /* need the memory controllers */
2162                 if (copy_from_user(&t, useraddr, sizeof(t)))
2163                         return -EFAULT;
2164                 if ((t.addr & 7) || (t.len & 7))
2165                         return -EINVAL;
2166                 if (t.mem_id == MEM_CM)
2167                         mem = &adapter->cm;
2168                 else if (t.mem_id == MEM_PMRX)
2169                         mem = &adapter->pmrx;
2170                 else if (t.mem_id == MEM_PMTX)
2171                         mem = &adapter->pmtx;
2172                 else
2173                         return -EINVAL;
2174
2175                 /*
2176                  * Version scheme:
2177                  * bits 0..9: chip version
2178                  * bits 10..15: chip revision
2179                  */
2180                 t.version = 3 | (adapter->params.rev << 10);
2181                 if (copy_to_user(useraddr, &t, sizeof(t)))
2182                         return -EFAULT;
2183
2184                 /*
2185                  * Read 256 bytes at a time as len can be large and we don't
2186                  * want to use huge intermediate buffers.
2187                  */
2188                 useraddr += sizeof(t);  /* advance to start of buffer */
2189                 while (t.len) {
2190                         unsigned int chunk =
2191                                 min_t(unsigned int, t.len, sizeof(buf));
2192
2193                         ret =
2194                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2195                                                 buf);
2196                         if (ret)
2197                                 return ret;
2198                         if (copy_to_user(useraddr, buf, chunk))
2199                                 return -EFAULT;
2200                         useraddr += chunk;
2201                         t.addr += chunk;
2202                         t.len -= chunk;
2203                 }
2204                 break;
2205         }
2206         case CHELSIO_SET_TRACE_FILTER:{
2207                 struct ch_trace t;
2208                 const struct trace_params *tp;
2209
2210                 if (!capable(CAP_NET_ADMIN))
2211                         return -EPERM;
2212                 if (!offload_running(adapter))
2213                         return -EAGAIN;
2214                 if (copy_from_user(&t, useraddr, sizeof(t)))
2215                         return -EFAULT;
2216
2217                 tp = (const struct trace_params *)&t.sip;
2218                 if (t.config_tx)
2219                         t3_config_trace_filter(adapter, tp, 0,
2220                                                 t.invert_match,
2221                                                 t.trace_tx);
2222                 if (t.config_rx)
2223                         t3_config_trace_filter(adapter, tp, 1,
2224                                                 t.invert_match,
2225                                                 t.trace_rx);
2226                 break;
2227         }
2228         default:
2229                 return -EOPNOTSUPP;
2230         }
2231         return 0;
2232 }
2233
2234 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2235 {
2236         struct mii_ioctl_data *data = if_mii(req);
2237         struct port_info *pi = netdev_priv(dev);
2238         struct adapter *adapter = pi->adapter;
2239         int ret, mmd;
2240
2241         switch (cmd) {
2242         case SIOCGMIIPHY:
2243                 data->phy_id = pi->phy.addr;
2244                 /* FALLTHRU */
2245         case SIOCGMIIREG:{
2246                 u32 val;
2247                 struct cphy *phy = &pi->phy;
2248
2249                 if (!phy->mdio_read)
2250                         return -EOPNOTSUPP;
2251                 if (is_10G(adapter)) {
2252                         mmd = data->phy_id >> 8;
2253                         if (!mmd)
2254                                 mmd = MDIO_DEV_PCS;
2255                         else if (mmd > MDIO_DEV_VEND2)
2256                                 return -EINVAL;
2257
2258                         ret =
2259                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2260                                                 mmd, data->reg_num, &val);
2261                 } else
2262                         ret =
2263                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2264                                                 0, data->reg_num & 0x1f,
2265                                                 &val);
2266                 if (!ret)
2267                         data->val_out = val;
2268                 break;
2269         }
2270         case SIOCSMIIREG:{
2271                 struct cphy *phy = &pi->phy;
2272
2273                 if (!capable(CAP_NET_ADMIN))
2274                         return -EPERM;
2275                 if (!phy->mdio_write)
2276                         return -EOPNOTSUPP;
2277                 if (is_10G(adapter)) {
2278                         mmd = data->phy_id >> 8;
2279                         if (!mmd)
2280                                 mmd = MDIO_DEV_PCS;
2281                         else if (mmd > MDIO_DEV_VEND2)
2282                                 return -EINVAL;
2283
2284                         ret =
2285                                 phy->mdio_write(adapter,
2286                                                 data->phy_id & 0x1f, mmd,
2287                                                 data->reg_num,
2288                                                 data->val_in);
2289                 } else
2290                         ret =
2291                                 phy->mdio_write(adapter,
2292                                                 data->phy_id & 0x1f, 0,
2293                                                 data->reg_num & 0x1f,
2294                                                 data->val_in);
2295                 break;
2296         }
2297         case SIOCCHIOCTL:
2298                 return cxgb_extension_ioctl(dev, req->ifr_data);
2299         default:
2300                 return -EOPNOTSUPP;
2301         }
2302         return ret;
2303 }
2304
2305 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2306 {
2307         struct port_info *pi = netdev_priv(dev);
2308         struct adapter *adapter = pi->adapter;
2309         int ret;
2310
2311         if (new_mtu < 81)       /* accommodate SACK */
2312                 return -EINVAL;
2313         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2314                 return ret;
2315         dev->mtu = new_mtu;
2316         init_port_mtus(adapter);
2317         if (adapter->params.rev == 0 && offload_running(adapter))
2318                 t3_load_mtus(adapter, adapter->params.mtus,
2319                              adapter->params.a_wnd, adapter->params.b_wnd,
2320                              adapter->port[0]->mtu);
2321         return 0;
2322 }
2323
2324 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2325 {
2326         struct port_info *pi = netdev_priv(dev);
2327         struct adapter *adapter = pi->adapter;
2328         struct sockaddr *addr = p;
2329
2330         if (!is_valid_ether_addr(addr->sa_data))
2331                 return -EINVAL;
2332
2333         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2334         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2335         if (offload_running(adapter))
2336                 write_smt_entry(adapter, pi->port_id);
2337         return 0;
2338 }
2339
2340 /**
2341  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2342  * @adap: the adapter
2343  * @p: the port
2344  *
2345  * Ensures that current Rx processing on any of the queues associated with
2346  * the given port completes before returning.  We do this by acquiring and
2347  * releasing the locks of the response queues associated with the port.
2348  */
2349 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2350 {
2351         int i;
2352
2353         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2354                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2355
2356                 spin_lock_irq(&q->lock);
2357                 spin_unlock_irq(&q->lock);
2358         }
2359 }
2360
2361 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2362 {
2363         struct port_info *pi = netdev_priv(dev);
2364         struct adapter *adapter = pi->adapter;
2365
2366         pi->vlan_grp = grp;
2367         if (adapter->params.rev > 0)
2368                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2369         else {
2370                 /* single control for all ports */
2371                 unsigned int i, have_vlans = 0;
2372                 for_each_port(adapter, i)
2373                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2374
2375                 t3_set_vlan_accel(adapter, 1, have_vlans);
2376         }
2377         t3_synchronize_rx(adapter, pi);
2378 }
2379
2380 #ifdef CONFIG_NET_POLL_CONTROLLER
2381 static void cxgb_netpoll(struct net_device *dev)
2382 {
2383         struct port_info *pi = netdev_priv(dev);
2384         struct adapter *adapter = pi->adapter;
2385         int qidx;
2386
2387         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2388                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2389                 void *source;
2390
2391                 if (adapter->flags & USING_MSIX)
2392                         source = qs;
2393                 else
2394                         source = adapter;
2395
2396                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2397         }
2398 }
2399 #endif
2400
2401 /*
2402  * Periodic accumulation of MAC statistics.
2403  */
2404 static void mac_stats_update(struct adapter *adapter)
2405 {
2406         int i;
2407
2408         for_each_port(adapter, i) {
2409                 struct net_device *dev = adapter->port[i];
2410                 struct port_info *p = netdev_priv(dev);
2411
2412                 if (netif_running(dev)) {
2413                         spin_lock(&adapter->stats_lock);
2414                         t3_mac_update_stats(&p->mac);
2415                         spin_unlock(&adapter->stats_lock);
2416                 }
2417         }
2418 }
2419
2420 static void check_link_status(struct adapter *adapter)
2421 {
2422         int i;
2423
2424         for_each_port(adapter, i) {
2425                 struct net_device *dev = adapter->port[i];
2426                 struct port_info *p = netdev_priv(dev);
2427
2428                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2429                         t3_link_changed(adapter, i);
2430         }
2431 }
2432
2433 static void check_t3b2_mac(struct adapter *adapter)
2434 {
2435         int i;
2436
2437         if (!rtnl_trylock())    /* synchronize with ifdown */
2438                 return;
2439
2440         for_each_port(adapter, i) {
2441                 struct net_device *dev = adapter->port[i];
2442                 struct port_info *p = netdev_priv(dev);
2443                 int status;
2444
2445                 if (!netif_running(dev))
2446                         continue;
2447
2448                 status = 0;
2449                 if (netif_running(dev) && netif_carrier_ok(dev))
2450                         status = t3b2_mac_watchdog_task(&p->mac);
2451                 if (status == 1)
2452                         p->mac.stats.num_toggled++;
2453                 else if (status == 2) {
2454                         struct cmac *mac = &p->mac;
2455
2456                         t3_mac_set_mtu(mac, dev->mtu);
2457                         t3_mac_set_address(mac, 0, dev->dev_addr);
2458                         cxgb_set_rxmode(dev);
2459                         t3_link_start(&p->phy, mac, &p->link_config);
2460                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2461                         t3_port_intr_enable(adapter, p->port_id);
2462                         p->mac.stats.num_resets++;
2463                 }
2464         }
2465         rtnl_unlock();
2466 }
2467
2468
2469 static void t3_adap_check_task(struct work_struct *work)
2470 {
2471         struct adapter *adapter = container_of(work, struct adapter,
2472                                                adap_check_task.work);
2473         const struct adapter_params *p = &adapter->params;
2474
2475         adapter->check_task_cnt++;
2476
2477         /* Check link status for PHYs without interrupts */
2478         if (p->linkpoll_period)
2479                 check_link_status(adapter);
2480
2481         /* Accumulate MAC stats if needed */
2482         if (!p->linkpoll_period ||
2483             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2484             p->stats_update_period) {
2485                 mac_stats_update(adapter);
2486                 adapter->check_task_cnt = 0;
2487         }
2488
2489         if (p->rev == T3_REV_B2)
2490                 check_t3b2_mac(adapter);
2491
2492         /* Schedule the next check update if any port is active. */
2493         spin_lock_irq(&adapter->work_lock);
2494         if (adapter->open_device_map & PORT_MASK)
2495                 schedule_chk_task(adapter);
2496         spin_unlock_irq(&adapter->work_lock);
2497 }
2498
2499 /*
2500  * Processes external (PHY) interrupts in process context.
2501  */
2502 static void ext_intr_task(struct work_struct *work)
2503 {
2504         struct adapter *adapter = container_of(work, struct adapter,
2505                                                ext_intr_handler_task);
2506
2507         t3_phy_intr_handler(adapter);
2508
2509         /* Now reenable external interrupts */
2510         spin_lock_irq(&adapter->work_lock);
2511         if (adapter->slow_intr_mask) {
2512                 adapter->slow_intr_mask |= F_T3DBG;
2513                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2514                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2515                              adapter->slow_intr_mask);
2516         }
2517         spin_unlock_irq(&adapter->work_lock);
2518 }
2519
2520 /*
2521  * Interrupt-context handler for external (PHY) interrupts.
2522  */
2523 void t3_os_ext_intr_handler(struct adapter *adapter)
2524 {
2525         /*
2526          * Schedule a task to handle external interrupts as they may be slow
2527          * and we use a mutex to protect MDIO registers.  We disable PHY
2528          * interrupts in the meantime and let the task reenable them when
2529          * it's done.
2530          */
2531         spin_lock(&adapter->work_lock);
2532         if (adapter->slow_intr_mask) {
2533                 adapter->slow_intr_mask &= ~F_T3DBG;
2534                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2535                              adapter->slow_intr_mask);
2536                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2537         }
2538         spin_unlock(&adapter->work_lock);
2539 }
2540
2541 static int t3_adapter_error(struct adapter *adapter, int reset)
2542 {
2543         int i, ret = 0;
2544
2545         if (is_offload(adapter) &&
2546             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2547                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2548                 offload_close(&adapter->tdev);
2549         }
2550
2551         /* Stop all ports */
2552         for_each_port(adapter, i) {
2553                 struct net_device *netdev = adapter->port[i];
2554
2555                 if (netif_running(netdev))
2556                         cxgb_close(netdev);
2557         }
2558
2559         /* Stop SGE timers */
2560         t3_stop_sge_timers(adapter);
2561
2562         adapter->flags &= ~FULL_INIT_DONE;
2563
2564         if (reset)
2565                 ret = t3_reset_adapter(adapter);
2566
2567         pci_disable_device(adapter->pdev);
2568
2569         return ret;
2570 }
2571
2572 static int t3_reenable_adapter(struct adapter *adapter)
2573 {
2574         if (pci_enable_device(adapter->pdev)) {
2575                 dev_err(&adapter->pdev->dev,
2576                         "Cannot re-enable PCI device after reset.\n");
2577                 goto err;
2578         }
2579         pci_set_master(adapter->pdev);
2580         pci_restore_state(adapter->pdev);
2581
2582         /* Free sge resources */
2583         t3_free_sge_resources(adapter);
2584
2585         if (t3_replay_prep_adapter(adapter))
2586                 goto err;
2587
2588         return 0;
2589 err:
2590         return -1;
2591 }
2592
2593 static void t3_resume_ports(struct adapter *adapter)
2594 {
2595         int i;
2596
2597         /* Restart the ports */
2598         for_each_port(adapter, i) {
2599                 struct net_device *netdev = adapter->port[i];
2600
2601                 if (netif_running(netdev)) {
2602                         if (cxgb_open(netdev)) {
2603                                 dev_err(&adapter->pdev->dev,
2604                                         "can't bring device back up"
2605                                         " after reset\n");
2606                                 continue;
2607                         }
2608                 }
2609         }
2610
2611         if (is_offload(adapter) && !ofld_disable)
2612                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2613 }
2614
2615 /*
2616  * processes a fatal error.
2617  * Bring the ports down, reset the chip, bring the ports back up.
2618  */
2619 static void fatal_error_task(struct work_struct *work)
2620 {
2621         struct adapter *adapter = container_of(work, struct adapter,
2622                                                fatal_error_handler_task);
2623         int err = 0;
2624
2625         rtnl_lock();
2626         err = t3_adapter_error(adapter, 1);
2627         if (!err)
2628                 err = t3_reenable_adapter(adapter);
2629         if (!err)
2630                 t3_resume_ports(adapter);
2631
2632         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2633         rtnl_unlock();
2634 }
2635
2636 void t3_fatal_err(struct adapter *adapter)
2637 {
2638         unsigned int fw_status[4];
2639
2640         if (adapter->flags & FULL_INIT_DONE) {
2641                 t3_sge_stop(adapter);
2642                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2643                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2644                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2645                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2646
2647                 spin_lock(&adapter->work_lock);
2648                 t3_intr_disable(adapter);
2649                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2650                 spin_unlock(&adapter->work_lock);
2651         }
2652         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2653         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2654                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2655                          fw_status[0], fw_status[1],
2656                          fw_status[2], fw_status[3]);
2657
2658 }
2659
2660 /**
2661  * t3_io_error_detected - called when PCI error is detected
2662  * @pdev: Pointer to PCI device
2663  * @state: The current pci connection state
2664  *
2665  * This function is called after a PCI bus error affecting
2666  * this device has been detected.
2667  */
2668 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2669                                              pci_channel_state_t state)
2670 {
2671         struct adapter *adapter = pci_get_drvdata(pdev);
2672         int ret;
2673
2674         ret = t3_adapter_error(adapter, 0);
2675
2676         /* Request a slot reset. */
2677         return PCI_ERS_RESULT_NEED_RESET;
2678 }
2679
2680 /**
2681  * t3_io_slot_reset - called after the pci bus has been reset.
2682  * @pdev: Pointer to PCI device
2683  *
2684  * Restart the card from scratch, as if from a cold-boot.
2685  */
2686 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2687 {
2688         struct adapter *adapter = pci_get_drvdata(pdev);
2689
2690         if (!t3_reenable_adapter(adapter))
2691                 return PCI_ERS_RESULT_RECOVERED;
2692
2693         return PCI_ERS_RESULT_DISCONNECT;
2694 }
2695
2696 /**
2697  * t3_io_resume - called when traffic can start flowing again.
2698  * @pdev: Pointer to PCI device
2699  *
2700  * This callback is called when the error recovery driver tells us that
2701  * its OK to resume normal operation.
2702  */
2703 static void t3_io_resume(struct pci_dev *pdev)
2704 {
2705         struct adapter *adapter = pci_get_drvdata(pdev);
2706
2707         t3_resume_ports(adapter);
2708 }
2709
2710 static struct pci_error_handlers t3_err_handler = {
2711         .error_detected = t3_io_error_detected,
2712         .slot_reset = t3_io_slot_reset,
2713         .resume = t3_io_resume,
2714 };
2715
2716 /*
2717  * Set the number of qsets based on the number of CPUs and the number of ports,
2718  * not to exceed the number of available qsets, assuming there are enough qsets
2719  * per port in HW.
2720  */
2721 static void set_nqsets(struct adapter *adap)
2722 {
2723         int i, j = 0;
2724         int num_cpus = num_online_cpus();
2725         int hwports = adap->params.nports;
2726         int nqsets = adap->msix_nvectors - 1;
2727
2728         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2729                 if (hwports == 2 &&
2730                     (hwports * nqsets > SGE_QSETS ||
2731                      num_cpus >= nqsets / hwports))
2732                         nqsets /= hwports;
2733                 if (nqsets > num_cpus)
2734                         nqsets = num_cpus;
2735                 if (nqsets < 1 || hwports == 4)
2736                         nqsets = 1;
2737         } else
2738                 nqsets = 1;
2739
2740         for_each_port(adap, i) {
2741                 struct port_info *pi = adap2pinfo(adap, i);
2742
2743                 pi->first_qset = j;
2744                 pi->nqsets = nqsets;
2745                 j = pi->first_qset + nqsets;
2746
2747                 dev_info(&adap->pdev->dev,
2748                          "Port %d using %d queue sets.\n", i, nqsets);
2749         }
2750 }
2751
2752 static int __devinit cxgb_enable_msix(struct adapter *adap)
2753 {
2754         struct msix_entry entries[SGE_QSETS + 1];
2755         int vectors;
2756         int i, err;
2757
2758         vectors = ARRAY_SIZE(entries);
2759         for (i = 0; i < vectors; ++i)
2760                 entries[i].entry = i;
2761
2762         while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2763                 vectors = err;
2764
2765         if (!err && vectors < (adap->params.nports + 1))
2766                 err = -1;
2767
2768         if (!err) {
2769                 for (i = 0; i < vectors; ++i)
2770                         adap->msix_info[i].vec = entries[i].vector;
2771                 adap->msix_nvectors = vectors;
2772         }
2773
2774         return err;
2775 }
2776
2777 static void __devinit print_port_info(struct adapter *adap,
2778                                       const struct adapter_info *ai)
2779 {
2780         static const char *pci_variant[] = {
2781                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2782         };
2783
2784         int i;
2785         char buf[80];
2786
2787         if (is_pcie(adap))
2788                 snprintf(buf, sizeof(buf), "%s x%d",
2789                          pci_variant[adap->params.pci.variant],
2790                          adap->params.pci.width);
2791         else
2792                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2793                          pci_variant[adap->params.pci.variant],
2794                          adap->params.pci.speed, adap->params.pci.width);
2795
2796         for_each_port(adap, i) {
2797                 struct net_device *dev = adap->port[i];
2798                 const struct port_info *pi = netdev_priv(dev);
2799
2800                 if (!test_bit(i, &adap->registered_device_map))
2801                         continue;
2802                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2803                        dev->name, ai->desc, pi->phy.desc,
2804                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2805                        (adap->flags & USING_MSIX) ? " MSI-X" :
2806                        (adap->flags & USING_MSI) ? " MSI" : "");
2807                 if (adap->name == dev->name && adap->params.vpd.mclk)
2808                         printk(KERN_INFO
2809                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2810                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2811                                t3_mc7_size(&adap->pmtx) >> 20,
2812                                t3_mc7_size(&adap->pmrx) >> 20,
2813                                adap->params.vpd.sn);
2814         }
2815 }
2816
2817 static const struct net_device_ops cxgb_netdev_ops = {
2818         .ndo_open               = cxgb_open,
2819         .ndo_stop               = cxgb_close,
2820         .ndo_start_xmit         = t3_eth_xmit,
2821         .ndo_get_stats          = cxgb_get_stats,
2822         .ndo_validate_addr      = eth_validate_addr,
2823         .ndo_set_multicast_list = cxgb_set_rxmode,
2824         .ndo_do_ioctl           = cxgb_ioctl,
2825         .ndo_change_mtu         = cxgb_change_mtu,
2826         .ndo_set_mac_address    = cxgb_set_mac_addr,
2827         .ndo_vlan_rx_register   = vlan_rx_register,
2828 #ifdef CONFIG_NET_POLL_CONTROLLER
2829         .ndo_poll_controller    = cxgb_netpoll,
2830 #endif
2831 };
2832
2833 static int __devinit init_one(struct pci_dev *pdev,
2834                               const struct pci_device_id *ent)
2835 {
2836         static int version_printed;
2837
2838         int i, err, pci_using_dac = 0;
2839         unsigned long mmio_start, mmio_len;
2840         const struct adapter_info *ai;
2841         struct adapter *adapter = NULL;
2842         struct port_info *pi;
2843
2844         if (!version_printed) {
2845                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2846                 ++version_printed;
2847         }
2848
2849         if (!cxgb3_wq) {
2850                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2851                 if (!cxgb3_wq) {
2852                         printk(KERN_ERR DRV_NAME
2853                                ": cannot initialize work queue\n");
2854                         return -ENOMEM;
2855                 }
2856         }
2857
2858         err = pci_request_regions(pdev, DRV_NAME);
2859         if (err) {
2860                 /* Just info, some other driver may have claimed the device. */
2861                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2862                 return err;
2863         }
2864
2865         err = pci_enable_device(pdev);
2866         if (err) {
2867                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2868                 goto out_release_regions;
2869         }
2870
2871         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2872                 pci_using_dac = 1;
2873                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2874                 if (err) {
2875                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2876                                "coherent allocations\n");
2877                         goto out_disable_device;
2878                 }
2879         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2880                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2881                 goto out_disable_device;
2882         }
2883
2884         pci_set_master(pdev);
2885         pci_save_state(pdev);
2886
2887         mmio_start = pci_resource_start(pdev, 0);
2888         mmio_len = pci_resource_len(pdev, 0);
2889         ai = t3_get_adapter_info(ent->driver_data);
2890
2891         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2892         if (!adapter) {
2893                 err = -ENOMEM;
2894                 goto out_disable_device;
2895         }
2896
2897         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2898         if (!adapter->regs) {
2899                 dev_err(&pdev->dev, "cannot map device registers\n");
2900                 err = -ENOMEM;
2901                 goto out_free_adapter;
2902         }
2903
2904         adapter->pdev = pdev;
2905         adapter->name = pci_name(pdev);
2906         adapter->msg_enable = dflt_msg_enable;
2907         adapter->mmio_len = mmio_len;
2908
2909         mutex_init(&adapter->mdio_lock);
2910         spin_lock_init(&adapter->work_lock);
2911         spin_lock_init(&adapter->stats_lock);
2912
2913         INIT_LIST_HEAD(&adapter->adapter_list);
2914         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2915         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2916         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2917
2918         for (i = 0; i < ai->nports; ++i) {
2919                 struct net_device *netdev;
2920
2921                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2922                 if (!netdev) {
2923                         err = -ENOMEM;
2924                         goto out_free_dev;
2925                 }
2926
2927                 SET_NETDEV_DEV(netdev, &pdev->dev);
2928
2929                 adapter->port[i] = netdev;
2930                 pi = netdev_priv(netdev);
2931                 pi->adapter = adapter;
2932                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
2933                 pi->port_id = i;
2934                 netif_carrier_off(netdev);
2935                 netif_tx_stop_all_queues(netdev);
2936                 netdev->irq = pdev->irq;
2937                 netdev->mem_start = mmio_start;
2938                 netdev->mem_end = mmio_start + mmio_len - 1;
2939                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2940                 netdev->features |= NETIF_F_LLTX;
2941                 netdev->features |= NETIF_F_GRO;
2942                 if (pci_using_dac)
2943                         netdev->features |= NETIF_F_HIGHDMA;
2944
2945                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2946                 netdev->netdev_ops = &cxgb_netdev_ops;
2947                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2948         }
2949
2950         pci_set_drvdata(pdev, adapter);
2951         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2952                 err = -ENODEV;
2953                 goto out_free_dev;
2954         }
2955
2956         /*
2957          * The card is now ready to go.  If any errors occur during device
2958          * registration we do not fail the whole card but rather proceed only
2959          * with the ports we manage to register successfully.  However we must
2960          * register at least one net device.
2961          */
2962         for_each_port(adapter, i) {
2963                 err = register_netdev(adapter->port[i]);
2964                 if (err)
2965                         dev_warn(&pdev->dev,
2966                                  "cannot register net device %s, skipping\n",
2967                                  adapter->port[i]->name);
2968                 else {
2969                         /*
2970                          * Change the name we use for messages to the name of
2971                          * the first successfully registered interface.
2972                          */
2973                         if (!adapter->registered_device_map)
2974                                 adapter->name = adapter->port[i]->name;
2975
2976                         __set_bit(i, &adapter->registered_device_map);
2977                 }
2978         }
2979         if (!adapter->registered_device_map) {
2980                 dev_err(&pdev->dev, "could not register any net devices\n");
2981                 goto out_free_dev;
2982         }
2983
2984         /* Driver's ready. Reflect it on LEDs */
2985         t3_led_ready(adapter);
2986
2987         if (is_offload(adapter)) {
2988                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2989                 cxgb3_adapter_ofld(adapter);
2990         }
2991
2992         /* See what interrupts we'll be using */
2993         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2994                 adapter->flags |= USING_MSIX;
2995         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2996                 adapter->flags |= USING_MSI;
2997
2998         set_nqsets(adapter);
2999
3000         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3001                                  &cxgb3_attr_group);
3002
3003         print_port_info(adapter, ai);
3004         return 0;
3005
3006 out_free_dev:
3007         iounmap(adapter->regs);
3008         for (i = ai->nports - 1; i >= 0; --i)
3009                 if (adapter->port[i])
3010                         free_netdev(adapter->port[i]);
3011
3012 out_free_adapter:
3013         kfree(adapter);
3014
3015 out_disable_device:
3016         pci_disable_device(pdev);
3017 out_release_regions:
3018         pci_release_regions(pdev);
3019         pci_set_drvdata(pdev, NULL);
3020         return err;
3021 }
3022
3023 static void __devexit remove_one(struct pci_dev *pdev)
3024 {
3025         struct adapter *adapter = pci_get_drvdata(pdev);
3026
3027         if (adapter) {
3028                 int i;
3029
3030                 t3_sge_stop(adapter);
3031                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3032                                    &cxgb3_attr_group);
3033
3034                 if (is_offload(adapter)) {
3035                         cxgb3_adapter_unofld(adapter);
3036                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3037                                      &adapter->open_device_map))
3038                                 offload_close(&adapter->tdev);
3039                 }
3040
3041                 for_each_port(adapter, i)
3042                     if (test_bit(i, &adapter->registered_device_map))
3043                         unregister_netdev(adapter->port[i]);
3044
3045                 t3_stop_sge_timers(adapter);
3046                 t3_free_sge_resources(adapter);
3047                 cxgb_disable_msi(adapter);
3048
3049                 for_each_port(adapter, i)
3050                         if (adapter->port[i])
3051                                 free_netdev(adapter->port[i]);
3052
3053                 iounmap(adapter->regs);
3054                 kfree(adapter);
3055                 pci_release_regions(pdev);
3056                 pci_disable_device(pdev);
3057                 pci_set_drvdata(pdev, NULL);
3058         }
3059 }
3060
3061 static struct pci_driver driver = {
3062         .name = DRV_NAME,
3063         .id_table = cxgb3_pci_tbl,
3064         .probe = init_one,
3065         .remove = __devexit_p(remove_one),
3066         .err_handler = &t3_err_handler,
3067 };
3068
3069 static int __init cxgb3_init_module(void)
3070 {
3071         int ret;
3072
3073         cxgb3_offload_init();
3074
3075         ret = pci_register_driver(&driver);
3076         return ret;
3077 }
3078
3079 static void __exit cxgb3_cleanup_module(void)
3080 {
3081         pci_unregister_driver(&driver);
3082         if (cxgb3_wq)
3083                 destroy_workqueue(cxgb3_wq);
3084 }
3085
3086 module_init(cxgb3_init_module);
3087 module_exit(cxgb3_cleanup_module);