2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
111 * The driver uses the best interrupt scheme available on a platform in the
112 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
113 * of these schemes the driver may consider as follows:
115 * msi = 2: choose from among all three options
116 * msi = 1: only consider MSI and pin interrupts
117 * msi = 0: force pin interrupts
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
125 * The driver enables offload as a default.
126 * To disable it, use ofld_disable = 1.
129 static int ofld_disable = 0;
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
135 * We have work elements that we need to cancel when an interface is taken
136 * down. Normally the work elements would be executed by keventd but that
137 * can deadlock because of linkwatch. If our close method takes the rtnl
138 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140 * for our work to complete. Get our own work queue to solve this.
142 static struct workqueue_struct *cxgb3_wq;
145 * link_report - show link status and link speed/duplex
146 * @p: the port whose settings are to be reported
148 * Shows the link status, speed, and duplex of a port.
150 static void link_report(struct net_device *dev)
152 if (!netif_carrier_ok(dev))
153 printk(KERN_INFO "%s: link down\n", dev->name);
155 const char *s = "10Mbps";
156 const struct port_info *p = netdev_priv(dev);
158 switch (p->link_config.speed) {
170 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
177 struct net_device *dev = adap->port[port_id];
178 struct port_info *pi = netdev_priv(dev);
180 if (state == netif_carrier_ok(dev))
184 struct cmac *mac = &pi->mac;
186 netif_carrier_on(dev);
188 /* Clear local faults */
189 t3_xgm_intr_disable(adap, pi->port_id);
190 t3_read_reg(adap, A_XGM_INT_STATUS +
193 A_XGM_INT_CAUSE + pi->mac.offset,
196 t3_set_reg_field(adap,
199 F_XGM_INT, F_XGM_INT);
200 t3_xgm_intr_enable(adap, pi->port_id);
202 t3_mac_enable(mac, MAC_DIRECTION_TX);
204 netif_carrier_off(dev);
210 * t3_os_link_changed - handle link status changes
211 * @adapter: the adapter associated with the link change
212 * @port_id: the port index whose limk status has changed
213 * @link_stat: the new status of the link
214 * @speed: the new speed setting
215 * @duplex: the new duplex setting
216 * @pause: the new flow-control setting
218 * This is the OS-dependent handler for link status changes. The OS
219 * neutral handler takes care of most of the processing for these events,
220 * then calls this handler for any OS-specific processing.
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223 int speed, int duplex, int pause)
225 struct net_device *dev = adapter->port[port_id];
226 struct port_info *pi = netdev_priv(dev);
227 struct cmac *mac = &pi->mac;
229 /* Skip changes from disabled ports. */
230 if (!netif_running(dev))
233 if (link_stat != netif_carrier_ok(dev)) {
235 t3_mac_enable(mac, MAC_DIRECTION_RX);
237 /* Clear local faults */
238 t3_xgm_intr_disable(adapter, pi->port_id);
239 t3_read_reg(adapter, A_XGM_INT_STATUS +
241 t3_write_reg(adapter,
242 A_XGM_INT_CAUSE + pi->mac.offset,
245 t3_set_reg_field(adapter,
246 A_XGM_INT_ENABLE + pi->mac.offset,
247 F_XGM_INT, F_XGM_INT);
248 t3_xgm_intr_enable(adapter, pi->port_id);
250 netif_carrier_on(dev);
252 netif_carrier_off(dev);
254 t3_xgm_intr_disable(adapter, pi->port_id);
255 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256 t3_set_reg_field(adapter,
257 A_XGM_INT_ENABLE + pi->mac.offset,
261 pi->phy.ops->power_down(&pi->phy, 1);
263 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264 t3_mac_disable(mac, MAC_DIRECTION_RX);
265 t3_link_start(&pi->phy, mac, &pi->link_config);
273 * t3_os_phymod_changed - handle PHY module changes
274 * @phy: the PHY reporting the module change
275 * @mod_type: new module type
277 * This is the OS-dependent handler for PHY module changes. It is
278 * invoked when a PHY module is removed or inserted for any OS-specific
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
283 static const char *mod_str[] = {
284 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
287 const struct net_device *dev = adap->port[port_id];
288 const struct port_info *pi = netdev_priv(dev);
290 if (pi->phy.modtype == phy_modtype_none)
291 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
293 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294 mod_str[pi->phy.modtype]);
297 static void cxgb_set_rxmode(struct net_device *dev)
299 struct t3_rx_mode rm;
300 struct port_info *pi = netdev_priv(dev);
302 init_rx_mode(&rm, dev, dev->mc_list);
303 t3_mac_set_rx_mode(&pi->mac, &rm);
307 * link_start - enable a port
308 * @dev: the device to enable
310 * Performs the MAC and PHY actions needed to enable a port.
312 static void link_start(struct net_device *dev)
314 struct t3_rx_mode rm;
315 struct port_info *pi = netdev_priv(dev);
316 struct cmac *mac = &pi->mac;
318 init_rx_mode(&rm, dev, dev->mc_list);
320 t3_mac_set_mtu(mac, dev->mtu);
321 t3_mac_set_address(mac, 0, dev->dev_addr);
322 t3_mac_set_rx_mode(mac, &rm);
323 t3_link_start(&pi->phy, mac, &pi->link_config);
324 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
327 static inline void cxgb_disable_msi(struct adapter *adapter)
329 if (adapter->flags & USING_MSIX) {
330 pci_disable_msix(adapter->pdev);
331 adapter->flags &= ~USING_MSIX;
332 } else if (adapter->flags & USING_MSI) {
333 pci_disable_msi(adapter->pdev);
334 adapter->flags &= ~USING_MSI;
339 * Interrupt handler for asynchronous events used with MSI-X.
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
343 t3_slow_intr_handler(cookie);
348 * Name the MSI-X interrupts.
350 static void name_msix_vecs(struct adapter *adap)
352 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
354 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355 adap->msix_info[0].desc[n] = 0;
357 for_each_port(adap, j) {
358 struct net_device *d = adap->port[j];
359 const struct port_info *pi = netdev_priv(d);
361 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362 snprintf(adap->msix_info[msi_idx].desc, n,
363 "%s-%d", d->name, pi->first_qset + i);
364 adap->msix_info[msi_idx].desc[n] = 0;
369 static int request_msix_data_irqs(struct adapter *adap)
371 int i, j, err, qidx = 0;
373 for_each_port(adap, i) {
374 int nqsets = adap2pinfo(adap, i)->nqsets;
376 for (j = 0; j < nqsets; ++j) {
377 err = request_irq(adap->msix_info[qidx + 1].vec,
378 t3_intr_handler(adap,
381 adap->msix_info[qidx + 1].desc,
382 &adap->sge.qs[qidx]);
385 free_irq(adap->msix_info[qidx + 1].vec,
386 &adap->sge.qs[qidx]);
395 static void free_irq_resources(struct adapter *adapter)
397 if (adapter->flags & USING_MSIX) {
400 free_irq(adapter->msix_info[0].vec, adapter);
401 for_each_port(adapter, i)
402 n += adap2pinfo(adapter, i)->nqsets;
404 for (i = 0; i < n; ++i)
405 free_irq(adapter->msix_info[i + 1].vec,
406 &adapter->sge.qs[i]);
408 free_irq(adapter->pdev->irq, adapter);
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
416 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
424 static int init_tp_parity(struct adapter *adap)
428 struct cpl_set_tcb_field *greq;
429 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
431 t3_tp_set_offload_mode(adap, 1);
433 for (i = 0; i < 16; i++) {
434 struct cpl_smt_write_req *req;
436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
438 skb = adap->nofail_skb;
442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
443 memset(req, 0, sizeof(*req));
444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
447 t3_mgmt_tx(adap, skb);
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
456 for (i = 0; i < 2048; i++) {
457 struct cpl_l2t_write_req *req;
459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
461 skb = adap->nofail_skb;
465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
466 memset(req, 0, sizeof(*req));
467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
469 req->params = htonl(V_L2T_W_IDX(i));
470 t3_mgmt_tx(adap, skb);
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
479 for (i = 0; i < 2048; i++) {
480 struct cpl_rte_write_req *req;
482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
484 skb = adap->nofail_skb;
488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
489 memset(req, 0, sizeof(*req));
490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
493 t3_mgmt_tx(adap, skb);
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
504 skb = adap->nofail_skb;
508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
509 memset(greq, 0, sizeof(*greq));
510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
511 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
512 greq->mask = cpu_to_be64(1);
513 t3_mgmt_tx(adap, skb);
515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
521 t3_tp_set_offload_mode(adap, 0);
525 t3_tp_set_offload_mode(adap, 0);
530 * setup_rss - configure RSS
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for two ports since the mapping
538 * table has plenty of entries.
540 static void setup_rss(struct adapter *adap)
543 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
544 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
545 u8 cpus[SGE_QSETS + 1];
546 u16 rspq_map[RSS_TABLE_SIZE];
548 for (i = 0; i < SGE_QSETS; ++i)
550 cpus[SGE_QSETS] = 0xff; /* terminator */
552 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
553 rspq_map[i] = i % nq0;
554 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
557 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
558 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
559 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
562 static void init_napi(struct adapter *adap)
566 for (i = 0; i < SGE_QSETS; i++) {
567 struct sge_qset *qs = &adap->sge.qs[i];
570 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
575 * netif_napi_add() can be called only once per napi_struct because it
576 * adds each new napi_struct to a list. Be careful not to call it a
577 * second time, e.g., during EEH recovery, by making a note of it.
579 adap->flags |= NAPI_INIT;
583 * Wait until all NAPI handlers are descheduled. This includes the handlers of
584 * both netdevices representing interfaces and the dummy ones for the extra
587 static void quiesce_rx(struct adapter *adap)
591 for (i = 0; i < SGE_QSETS; i++)
592 if (adap->sge.qs[i].adap)
593 napi_disable(&adap->sge.qs[i].napi);
596 static void enable_all_napi(struct adapter *adap)
599 for (i = 0; i < SGE_QSETS; i++)
600 if (adap->sge.qs[i].adap)
601 napi_enable(&adap->sge.qs[i].napi);
605 * set_qset_lro - Turn a queue set's LRO capability on and off
606 * @dev: the device the qset is attached to
607 * @qset_idx: the queue set index
608 * @val: the LRO switch
610 * Sets LRO on or off for a particular queue set.
611 * the device's features flag is updated to reflect the LRO
612 * capability when all queues belonging to the device are
615 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adapter = pi->adapter;
620 adapter->params.sge.qset[qset_idx].lro = !!val;
621 adapter->sge.qs[qset_idx].lro_enabled = !!val;
625 * setup_sge_qsets - configure SGE Tx/Rx/response queues
628 * Determines how many sets of SGE queues to use and initializes them.
629 * We support multiple queue sets per port if we have MSI-X, otherwise
630 * just one queue set per port.
632 static int setup_sge_qsets(struct adapter *adap)
634 int i, j, err, irq_idx = 0, qset_idx = 0;
635 unsigned int ntxq = SGE_TXQ_PER_SET;
637 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
640 for_each_port(adap, i) {
641 struct net_device *dev = adap->port[i];
642 struct port_info *pi = netdev_priv(dev);
644 pi->qs = &adap->sge.qs[pi->first_qset];
645 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
647 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
648 err = t3_sge_alloc_qset(adap, qset_idx, 1,
649 (adap->flags & USING_MSIX) ? qset_idx + 1 :
651 &adap->params.sge.qset[qset_idx], ntxq, dev,
652 netdev_get_tx_queue(dev, j));
654 t3_free_sge_resources(adap);
663 static ssize_t attr_show(struct device *d, char *buf,
664 ssize_t(*format) (struct net_device *, char *))
668 /* Synchronize with ioctls that may shut down the device */
670 len = (*format) (to_net_dev(d), buf);
675 static ssize_t attr_store(struct device *d,
676 const char *buf, size_t len,
677 ssize_t(*set) (struct net_device *, unsigned int),
678 unsigned int min_val, unsigned int max_val)
684 if (!capable(CAP_NET_ADMIN))
687 val = simple_strtoul(buf, &endp, 0);
688 if (endp == buf || val < min_val || val > max_val)
692 ret = (*set) (to_net_dev(d), val);
699 #define CXGB3_SHOW(name, val_expr) \
700 static ssize_t format_##name(struct net_device *dev, char *buf) \
702 struct port_info *pi = netdev_priv(dev); \
703 struct adapter *adap = pi->adapter; \
704 return sprintf(buf, "%u\n", val_expr); \
706 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
709 return attr_show(d, buf, format_##name); \
712 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
714 struct port_info *pi = netdev_priv(dev);
715 struct adapter *adap = pi->adapter;
716 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
718 if (adap->flags & FULL_INIT_DONE)
720 if (val && adap->params.rev == 0)
722 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
725 adap->params.mc5.nfilters = val;
729 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
730 const char *buf, size_t len)
732 return attr_store(d, buf, len, set_nfilters, 0, ~0);
735 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
737 struct port_info *pi = netdev_priv(dev);
738 struct adapter *adap = pi->adapter;
740 if (adap->flags & FULL_INIT_DONE)
742 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
745 adap->params.mc5.nservers = val;
749 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
750 const char *buf, size_t len)
752 return attr_store(d, buf, len, set_nservers, 0, ~0);
755 #define CXGB3_ATTR_R(name, val_expr) \
756 CXGB3_SHOW(name, val_expr) \
757 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
759 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
760 CXGB3_SHOW(name, val_expr) \
761 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
763 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
764 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
765 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
767 static struct attribute *cxgb3_attrs[] = {
768 &dev_attr_cam_size.attr,
769 &dev_attr_nfilters.attr,
770 &dev_attr_nservers.attr,
774 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
776 static ssize_t tm_attr_show(struct device *d,
777 char *buf, int sched)
779 struct port_info *pi = netdev_priv(to_net_dev(d));
780 struct adapter *adap = pi->adapter;
781 unsigned int v, addr, bpt, cpt;
784 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
786 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
787 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
790 bpt = (v >> 8) & 0xff;
793 len = sprintf(buf, "disabled\n");
795 v = (adap->params.vpd.cclk * 1000) / cpt;
796 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
802 static ssize_t tm_attr_store(struct device *d,
803 const char *buf, size_t len, int sched)
805 struct port_info *pi = netdev_priv(to_net_dev(d));
806 struct adapter *adap = pi->adapter;
811 if (!capable(CAP_NET_ADMIN))
814 val = simple_strtoul(buf, &endp, 0);
815 if (endp == buf || val > 10000000)
819 ret = t3_config_sched(adap, val, sched);
826 #define TM_ATTR(name, sched) \
827 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
830 return tm_attr_show(d, buf, sched); \
832 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
833 const char *buf, size_t len) \
835 return tm_attr_store(d, buf, len, sched); \
837 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
848 static struct attribute *offload_attrs[] = {
849 &dev_attr_sched0.attr,
850 &dev_attr_sched1.attr,
851 &dev_attr_sched2.attr,
852 &dev_attr_sched3.attr,
853 &dev_attr_sched4.attr,
854 &dev_attr_sched5.attr,
855 &dev_attr_sched6.attr,
856 &dev_attr_sched7.attr,
860 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
863 * Sends an sk_buff to an offload queue driver
864 * after dealing with any active network taps.
866 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
871 ret = t3_offload_tx(tdev, skb);
876 static int write_smt_entry(struct adapter *adapter, int idx)
878 struct cpl_smt_write_req *req;
879 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
884 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
885 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
886 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
887 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
889 memset(req->src_mac1, 0, sizeof(req->src_mac1));
890 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
892 offload_tx(&adapter->tdev, skb);
896 static int init_smt(struct adapter *adapter)
900 for_each_port(adapter, i)
901 write_smt_entry(adapter, i);
905 static void init_port_mtus(struct adapter *adapter)
907 unsigned int mtus = adapter->port[0]->mtu;
909 if (adapter->port[1])
910 mtus |= adapter->port[1]->mtu << 16;
911 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
914 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
918 struct mngt_pktsched_wr *req;
921 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
923 skb = adap->nofail_skb;
927 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
928 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
929 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
935 ret = t3_mgmt_tx(adap, skb);
936 if (skb == adap->nofail_skb) {
937 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
939 if (!adap->nofail_skb)
946 static int bind_qsets(struct adapter *adap)
950 for_each_port(adap, i) {
951 const struct port_info *pi = adap2pinfo(adap, i);
953 for (j = 0; j < pi->nqsets; ++j) {
954 int ret = send_pktsched_cmd(adap, 1,
955 pi->first_qset + j, -1,
965 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
966 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
967 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
968 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
969 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
971 static inline const char *get_edc_fw_name(int edc_idx)
973 const char *fw_name = NULL;
976 case EDC_OPT_AEL2005:
977 fw_name = AEL2005_OPT_EDC_NAME;
979 case EDC_TWX_AEL2005:
980 fw_name = AEL2005_TWX_EDC_NAME;
982 case EDC_TWX_AEL2020:
983 fw_name = AEL2020_TWX_EDC_NAME;
989 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
991 struct adapter *adapter = phy->adapter;
992 const struct firmware *fw;
996 u16 *cache = phy->phy_cache;
999 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1001 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1003 dev_err(&adapter->pdev->dev,
1004 "could not upgrade firmware: unable to load %s\n",
1009 /* check size, take checksum in account */
1010 if (fw->size > size + 4) {
1011 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1012 (unsigned int)fw->size, size + 4);
1016 /* compute checksum */
1017 p = (const __be32 *)fw->data;
1018 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1019 csum += ntohl(p[i]);
1021 if (csum != 0xffffffff) {
1022 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1027 for (i = 0; i < size / 4 ; i++) {
1028 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1029 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1032 release_firmware(fw);
1037 static int upgrade_fw(struct adapter *adap)
1041 const struct firmware *fw;
1042 struct device *dev = &adap->pdev->dev;
1044 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
1045 FW_VERSION_MINOR, FW_VERSION_MICRO);
1046 ret = request_firmware(&fw, buf, dev);
1048 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1052 ret = t3_load_fw(adap, fw->data, fw->size);
1053 release_firmware(fw);
1056 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1057 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1059 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1060 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1065 static inline char t3rev2char(struct adapter *adapter)
1069 switch(adapter->params.rev) {
1081 static int update_tpsram(struct adapter *adap)
1083 const struct firmware *tpsram;
1085 struct device *dev = &adap->pdev->dev;
1089 rev = t3rev2char(adap);
1093 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1094 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1096 ret = request_firmware(&tpsram, buf, dev);
1098 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1103 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1105 goto release_tpsram;
1107 ret = t3_set_proto_sram(adap, tpsram->data);
1110 "successful update of protocol engine "
1112 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1114 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1115 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1117 dev_err(dev, "loading protocol SRAM failed\n");
1120 release_firmware(tpsram);
1126 * cxgb_up - enable the adapter
1127 * @adapter: adapter being enabled
1129 * Called when the first port is enabled, this function performs the
1130 * actions necessary to make an adapter operational, such as completing
1131 * the initialization of HW modules, and enabling interrupts.
1133 * Must be called with the rtnl lock held.
1135 static int cxgb_up(struct adapter *adap)
1139 if (!(adap->flags & FULL_INIT_DONE)) {
1140 err = t3_check_fw_version(adap);
1141 if (err == -EINVAL) {
1142 err = upgrade_fw(adap);
1143 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1144 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1145 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1148 err = t3_check_tpsram_version(adap);
1149 if (err == -EINVAL) {
1150 err = update_tpsram(adap);
1151 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1152 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1153 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1157 * Clear interrupts now to catch errors if t3_init_hw fails.
1158 * We clear them again later as initialization may trigger
1159 * conditions that can interrupt.
1161 t3_intr_clear(adap);
1163 err = t3_init_hw(adap, 0);
1167 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1168 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1170 err = setup_sge_qsets(adap);
1175 if (!(adap->flags & NAPI_INIT))
1178 t3_start_sge_timers(adap);
1179 adap->flags |= FULL_INIT_DONE;
1182 t3_intr_clear(adap);
1184 if (adap->flags & USING_MSIX) {
1185 name_msix_vecs(adap);
1186 err = request_irq(adap->msix_info[0].vec,
1187 t3_async_intr_handler, 0,
1188 adap->msix_info[0].desc, adap);
1192 err = request_msix_data_irqs(adap);
1194 free_irq(adap->msix_info[0].vec, adap);
1197 } else if ((err = request_irq(adap->pdev->irq,
1198 t3_intr_handler(adap,
1199 adap->sge.qs[0].rspq.
1201 (adap->flags & USING_MSI) ?
1206 enable_all_napi(adap);
1208 t3_intr_enable(adap);
1210 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1211 is_offload(adap) && init_tp_parity(adap) == 0)
1212 adap->flags |= TP_PARITY_INIT;
1214 if (adap->flags & TP_PARITY_INIT) {
1215 t3_write_reg(adap, A_TP_INT_CAUSE,
1216 F_CMCACHEPERR | F_ARPLUTPERR);
1217 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1220 if (!(adap->flags & QUEUES_BOUND)) {
1221 err = bind_qsets(adap);
1223 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1224 t3_intr_disable(adap);
1225 free_irq_resources(adap);
1228 adap->flags |= QUEUES_BOUND;
1234 CH_ERR(adap, "request_irq failed, err %d\n", err);
1239 * Release resources when all the ports and offloading have been stopped.
1241 static void cxgb_down(struct adapter *adapter)
1243 t3_sge_stop(adapter);
1244 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1245 t3_intr_disable(adapter);
1246 spin_unlock_irq(&adapter->work_lock);
1248 free_irq_resources(adapter);
1249 quiesce_rx(adapter);
1250 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1253 static void schedule_chk_task(struct adapter *adap)
1257 timeo = adap->params.linkpoll_period ?
1258 (HZ * adap->params.linkpoll_period) / 10 :
1259 adap->params.stats_update_period * HZ;
1261 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1264 static int offload_open(struct net_device *dev)
1266 struct port_info *pi = netdev_priv(dev);
1267 struct adapter *adapter = pi->adapter;
1268 struct t3cdev *tdev = dev2t3cdev(dev);
1269 int adap_up = adapter->open_device_map & PORT_MASK;
1272 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1275 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1278 t3_tp_set_offload_mode(adapter, 1);
1279 tdev->lldev = adapter->port[0];
1280 err = cxgb3_offload_activate(adapter);
1284 init_port_mtus(adapter);
1285 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1286 adapter->params.b_wnd,
1287 adapter->params.rev == 0 ?
1288 adapter->port[0]->mtu : 0xffff);
1291 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1292 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1294 /* Call back all registered clients */
1295 cxgb3_add_clients(tdev);
1298 /* restore them in case the offload module has changed them */
1300 t3_tp_set_offload_mode(adapter, 0);
1301 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1302 cxgb3_set_dummy_ops(tdev);
1307 static int offload_close(struct t3cdev *tdev)
1309 struct adapter *adapter = tdev2adap(tdev);
1311 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1314 /* Call back all registered clients */
1315 cxgb3_remove_clients(tdev);
1317 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1319 /* Flush work scheduled while releasing TIDs */
1320 flush_scheduled_work();
1323 cxgb3_set_dummy_ops(tdev);
1324 t3_tp_set_offload_mode(adapter, 0);
1325 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1327 if (!adapter->open_device_map)
1330 cxgb3_offload_deactivate(adapter);
1334 static int cxgb_open(struct net_device *dev)
1336 struct port_info *pi = netdev_priv(dev);
1337 struct adapter *adapter = pi->adapter;
1338 int other_ports = adapter->open_device_map & PORT_MASK;
1341 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1344 set_bit(pi->port_id, &adapter->open_device_map);
1345 if (is_offload(adapter) && !ofld_disable) {
1346 err = offload_open(dev);
1349 "Could not initialize offload capabilities\n");
1352 dev->real_num_tx_queues = pi->nqsets;
1354 t3_port_intr_enable(adapter, pi->port_id);
1355 netif_tx_start_all_queues(dev);
1357 schedule_chk_task(adapter);
1362 static int cxgb_close(struct net_device *dev)
1364 struct port_info *pi = netdev_priv(dev);
1365 struct adapter *adapter = pi->adapter;
1368 if (!adapter->open_device_map)
1371 /* Stop link fault interrupts */
1372 t3_xgm_intr_disable(adapter, pi->port_id);
1373 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1375 t3_port_intr_disable(adapter, pi->port_id);
1376 netif_tx_stop_all_queues(dev);
1377 pi->phy.ops->power_down(&pi->phy, 1);
1378 netif_carrier_off(dev);
1379 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1381 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1382 clear_bit(pi->port_id, &adapter->open_device_map);
1383 spin_unlock_irq(&adapter->work_lock);
1385 if (!(adapter->open_device_map & PORT_MASK))
1386 cancel_delayed_work_sync(&adapter->adap_check_task);
1388 if (!adapter->open_device_map)
1394 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1396 struct port_info *pi = netdev_priv(dev);
1397 struct adapter *adapter = pi->adapter;
1398 struct net_device_stats *ns = &pi->netstats;
1399 const struct mac_stats *pstats;
1401 spin_lock(&adapter->stats_lock);
1402 pstats = t3_mac_update_stats(&pi->mac);
1403 spin_unlock(&adapter->stats_lock);
1405 ns->tx_bytes = pstats->tx_octets;
1406 ns->tx_packets = pstats->tx_frames;
1407 ns->rx_bytes = pstats->rx_octets;
1408 ns->rx_packets = pstats->rx_frames;
1409 ns->multicast = pstats->rx_mcast_frames;
1411 ns->tx_errors = pstats->tx_underrun;
1412 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1413 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1414 pstats->rx_fifo_ovfl;
1416 /* detailed rx_errors */
1417 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1418 ns->rx_over_errors = 0;
1419 ns->rx_crc_errors = pstats->rx_fcs_errs;
1420 ns->rx_frame_errors = pstats->rx_symbol_errs;
1421 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1422 ns->rx_missed_errors = pstats->rx_cong_drops;
1424 /* detailed tx_errors */
1425 ns->tx_aborted_errors = 0;
1426 ns->tx_carrier_errors = 0;
1427 ns->tx_fifo_errors = pstats->tx_underrun;
1428 ns->tx_heartbeat_errors = 0;
1429 ns->tx_window_errors = 0;
1433 static u32 get_msglevel(struct net_device *dev)
1435 struct port_info *pi = netdev_priv(dev);
1436 struct adapter *adapter = pi->adapter;
1438 return adapter->msg_enable;
1441 static void set_msglevel(struct net_device *dev, u32 val)
1443 struct port_info *pi = netdev_priv(dev);
1444 struct adapter *adapter = pi->adapter;
1446 adapter->msg_enable = val;
1449 static char stats_strings[][ETH_GSTRING_LEN] = {
1452 "TxMulticastFramesOK",
1453 "TxBroadcastFramesOK",
1460 "TxFrames128To255 ",
1461 "TxFrames256To511 ",
1462 "TxFrames512To1023 ",
1463 "TxFrames1024To1518 ",
1464 "TxFrames1519ToMax ",
1468 "RxMulticastFramesOK",
1469 "RxBroadcastFramesOK",
1480 "RxFrames128To255 ",
1481 "RxFrames256To511 ",
1482 "RxFrames512To1023 ",
1483 "RxFrames1024To1518 ",
1484 "RxFrames1519ToMax ",
1497 "CheckTXEnToggled ",
1503 static int get_sset_count(struct net_device *dev, int sset)
1507 return ARRAY_SIZE(stats_strings);
1513 #define T3_REGMAP_SIZE (3 * 1024)
1515 static int get_regs_len(struct net_device *dev)
1517 return T3_REGMAP_SIZE;
1520 static int get_eeprom_len(struct net_device *dev)
1525 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1527 struct port_info *pi = netdev_priv(dev);
1528 struct adapter *adapter = pi->adapter;
1532 spin_lock(&adapter->stats_lock);
1533 t3_get_fw_version(adapter, &fw_vers);
1534 t3_get_tp_version(adapter, &tp_vers);
1535 spin_unlock(&adapter->stats_lock);
1537 strcpy(info->driver, DRV_NAME);
1538 strcpy(info->version, DRV_VERSION);
1539 strcpy(info->bus_info, pci_name(adapter->pdev));
1541 strcpy(info->fw_version, "N/A");
1543 snprintf(info->fw_version, sizeof(info->fw_version),
1544 "%s %u.%u.%u TP %u.%u.%u",
1545 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1546 G_FW_VERSION_MAJOR(fw_vers),
1547 G_FW_VERSION_MINOR(fw_vers),
1548 G_FW_VERSION_MICRO(fw_vers),
1549 G_TP_VERSION_MAJOR(tp_vers),
1550 G_TP_VERSION_MINOR(tp_vers),
1551 G_TP_VERSION_MICRO(tp_vers));
1555 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1557 if (stringset == ETH_SS_STATS)
1558 memcpy(data, stats_strings, sizeof(stats_strings));
1561 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1562 struct port_info *p, int idx)
1565 unsigned long tot = 0;
1567 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1568 tot += adapter->sge.qs[i].port_stats[idx];
1572 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1575 struct port_info *pi = netdev_priv(dev);
1576 struct adapter *adapter = pi->adapter;
1577 const struct mac_stats *s;
1579 spin_lock(&adapter->stats_lock);
1580 s = t3_mac_update_stats(&pi->mac);
1581 spin_unlock(&adapter->stats_lock);
1583 *data++ = s->tx_octets;
1584 *data++ = s->tx_frames;
1585 *data++ = s->tx_mcast_frames;
1586 *data++ = s->tx_bcast_frames;
1587 *data++ = s->tx_pause;
1588 *data++ = s->tx_underrun;
1589 *data++ = s->tx_fifo_urun;
1591 *data++ = s->tx_frames_64;
1592 *data++ = s->tx_frames_65_127;
1593 *data++ = s->tx_frames_128_255;
1594 *data++ = s->tx_frames_256_511;
1595 *data++ = s->tx_frames_512_1023;
1596 *data++ = s->tx_frames_1024_1518;
1597 *data++ = s->tx_frames_1519_max;
1599 *data++ = s->rx_octets;
1600 *data++ = s->rx_frames;
1601 *data++ = s->rx_mcast_frames;
1602 *data++ = s->rx_bcast_frames;
1603 *data++ = s->rx_pause;
1604 *data++ = s->rx_fcs_errs;
1605 *data++ = s->rx_symbol_errs;
1606 *data++ = s->rx_short;
1607 *data++ = s->rx_jabber;
1608 *data++ = s->rx_too_long;
1609 *data++ = s->rx_fifo_ovfl;
1611 *data++ = s->rx_frames_64;
1612 *data++ = s->rx_frames_65_127;
1613 *data++ = s->rx_frames_128_255;
1614 *data++ = s->rx_frames_256_511;
1615 *data++ = s->rx_frames_512_1023;
1616 *data++ = s->rx_frames_1024_1518;
1617 *data++ = s->rx_frames_1519_max;
1619 *data++ = pi->phy.fifo_errors;
1621 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1622 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1623 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1624 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1625 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1629 *data++ = s->rx_cong_drops;
1631 *data++ = s->num_toggled;
1632 *data++ = s->num_resets;
1634 *data++ = s->link_faults;
1637 static inline void reg_block_dump(struct adapter *ap, void *buf,
1638 unsigned int start, unsigned int end)
1640 u32 *p = buf + start;
1642 for (; start <= end; start += sizeof(u32))
1643 *p++ = t3_read_reg(ap, start);
1646 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1649 struct port_info *pi = netdev_priv(dev);
1650 struct adapter *ap = pi->adapter;
1654 * bits 0..9: chip version
1655 * bits 10..15: chip revision
1656 * bit 31: set for PCIe cards
1658 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1661 * We skip the MAC statistics registers because they are clear-on-read.
1662 * Also reading multi-register stats would need to synchronize with the
1663 * periodic mac stats accumulation. Hard to justify the complexity.
1665 memset(buf, 0, T3_REGMAP_SIZE);
1666 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1667 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1668 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1669 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1670 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1671 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1672 XGM_REG(A_XGM_SERDES_STAT3, 1));
1673 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1674 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1677 static int restart_autoneg(struct net_device *dev)
1679 struct port_info *p = netdev_priv(dev);
1681 if (!netif_running(dev))
1683 if (p->link_config.autoneg != AUTONEG_ENABLE)
1685 p->phy.ops->autoneg_restart(&p->phy);
1689 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1691 struct port_info *pi = netdev_priv(dev);
1692 struct adapter *adapter = pi->adapter;
1698 for (i = 0; i < data * 2; i++) {
1699 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1700 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1701 if (msleep_interruptible(500))
1704 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1709 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1711 struct port_info *p = netdev_priv(dev);
1713 cmd->supported = p->link_config.supported;
1714 cmd->advertising = p->link_config.advertising;
1716 if (netif_carrier_ok(dev)) {
1717 cmd->speed = p->link_config.speed;
1718 cmd->duplex = p->link_config.duplex;
1724 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1725 cmd->phy_address = p->phy.mdio.prtad;
1726 cmd->transceiver = XCVR_EXTERNAL;
1727 cmd->autoneg = p->link_config.autoneg;
1733 static int speed_duplex_to_caps(int speed, int duplex)
1739 if (duplex == DUPLEX_FULL)
1740 cap = SUPPORTED_10baseT_Full;
1742 cap = SUPPORTED_10baseT_Half;
1745 if (duplex == DUPLEX_FULL)
1746 cap = SUPPORTED_100baseT_Full;
1748 cap = SUPPORTED_100baseT_Half;
1751 if (duplex == DUPLEX_FULL)
1752 cap = SUPPORTED_1000baseT_Full;
1754 cap = SUPPORTED_1000baseT_Half;
1757 if (duplex == DUPLEX_FULL)
1758 cap = SUPPORTED_10000baseT_Full;
1763 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1764 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1765 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1766 ADVERTISED_10000baseT_Full)
1768 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1770 struct port_info *p = netdev_priv(dev);
1771 struct link_config *lc = &p->link_config;
1773 if (!(lc->supported & SUPPORTED_Autoneg)) {
1775 * PHY offers a single speed/duplex. See if that's what's
1778 if (cmd->autoneg == AUTONEG_DISABLE) {
1779 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1780 if (lc->supported & cap)
1786 if (cmd->autoneg == AUTONEG_DISABLE) {
1787 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1789 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1791 lc->requested_speed = cmd->speed;
1792 lc->requested_duplex = cmd->duplex;
1793 lc->advertising = 0;
1795 cmd->advertising &= ADVERTISED_MASK;
1796 cmd->advertising &= lc->supported;
1797 if (!cmd->advertising)
1799 lc->requested_speed = SPEED_INVALID;
1800 lc->requested_duplex = DUPLEX_INVALID;
1801 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1803 lc->autoneg = cmd->autoneg;
1804 if (netif_running(dev))
1805 t3_link_start(&p->phy, &p->mac, lc);
1809 static void get_pauseparam(struct net_device *dev,
1810 struct ethtool_pauseparam *epause)
1812 struct port_info *p = netdev_priv(dev);
1814 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1815 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1816 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1819 static int set_pauseparam(struct net_device *dev,
1820 struct ethtool_pauseparam *epause)
1822 struct port_info *p = netdev_priv(dev);
1823 struct link_config *lc = &p->link_config;
1825 if (epause->autoneg == AUTONEG_DISABLE)
1826 lc->requested_fc = 0;
1827 else if (lc->supported & SUPPORTED_Autoneg)
1828 lc->requested_fc = PAUSE_AUTONEG;
1832 if (epause->rx_pause)
1833 lc->requested_fc |= PAUSE_RX;
1834 if (epause->tx_pause)
1835 lc->requested_fc |= PAUSE_TX;
1836 if (lc->autoneg == AUTONEG_ENABLE) {
1837 if (netif_running(dev))
1838 t3_link_start(&p->phy, &p->mac, lc);
1840 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1841 if (netif_running(dev))
1842 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1847 static u32 get_rx_csum(struct net_device *dev)
1849 struct port_info *p = netdev_priv(dev);
1851 return p->rx_offload & T3_RX_CSUM;
1854 static int set_rx_csum(struct net_device *dev, u32 data)
1856 struct port_info *p = netdev_priv(dev);
1859 p->rx_offload |= T3_RX_CSUM;
1863 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1864 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1865 set_qset_lro(dev, i, 0);
1870 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1872 struct port_info *pi = netdev_priv(dev);
1873 struct adapter *adapter = pi->adapter;
1874 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1876 e->rx_max_pending = MAX_RX_BUFFERS;
1877 e->rx_mini_max_pending = 0;
1878 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1879 e->tx_max_pending = MAX_TXQ_ENTRIES;
1881 e->rx_pending = q->fl_size;
1882 e->rx_mini_pending = q->rspq_size;
1883 e->rx_jumbo_pending = q->jumbo_size;
1884 e->tx_pending = q->txq_size[0];
1887 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1889 struct port_info *pi = netdev_priv(dev);
1890 struct adapter *adapter = pi->adapter;
1891 struct qset_params *q;
1894 if (e->rx_pending > MAX_RX_BUFFERS ||
1895 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1896 e->tx_pending > MAX_TXQ_ENTRIES ||
1897 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1898 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1899 e->rx_pending < MIN_FL_ENTRIES ||
1900 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1901 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1904 if (adapter->flags & FULL_INIT_DONE)
1907 q = &adapter->params.sge.qset[pi->first_qset];
1908 for (i = 0; i < pi->nqsets; ++i, ++q) {
1909 q->rspq_size = e->rx_mini_pending;
1910 q->fl_size = e->rx_pending;
1911 q->jumbo_size = e->rx_jumbo_pending;
1912 q->txq_size[0] = e->tx_pending;
1913 q->txq_size[1] = e->tx_pending;
1914 q->txq_size[2] = e->tx_pending;
1919 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1921 struct port_info *pi = netdev_priv(dev);
1922 struct adapter *adapter = pi->adapter;
1923 struct qset_params *qsp = &adapter->params.sge.qset[0];
1924 struct sge_qset *qs = &adapter->sge.qs[0];
1926 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1929 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1930 t3_update_qset_coalesce(qs, qsp);
1934 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1936 struct port_info *pi = netdev_priv(dev);
1937 struct adapter *adapter = pi->adapter;
1938 struct qset_params *q = adapter->params.sge.qset;
1940 c->rx_coalesce_usecs = q->coalesce_usecs;
1944 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1947 struct port_info *pi = netdev_priv(dev);
1948 struct adapter *adapter = pi->adapter;
1951 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1955 e->magic = EEPROM_MAGIC;
1956 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1957 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1960 memcpy(data, buf + e->offset, e->len);
1965 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1968 struct port_info *pi = netdev_priv(dev);
1969 struct adapter *adapter = pi->adapter;
1970 u32 aligned_offset, aligned_len;
1975 if (eeprom->magic != EEPROM_MAGIC)
1978 aligned_offset = eeprom->offset & ~3;
1979 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1981 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1982 buf = kmalloc(aligned_len, GFP_KERNEL);
1985 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1986 if (!err && aligned_len > 4)
1987 err = t3_seeprom_read(adapter,
1988 aligned_offset + aligned_len - 4,
1989 (__le32 *) & buf[aligned_len - 4]);
1992 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1996 err = t3_seeprom_wp(adapter, 0);
2000 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2001 err = t3_seeprom_write(adapter, aligned_offset, *p);
2002 aligned_offset += 4;
2006 err = t3_seeprom_wp(adapter, 1);
2013 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2017 memset(&wol->sopass, 0, sizeof(wol->sopass));
2020 static const struct ethtool_ops cxgb_ethtool_ops = {
2021 .get_settings = get_settings,
2022 .set_settings = set_settings,
2023 .get_drvinfo = get_drvinfo,
2024 .get_msglevel = get_msglevel,
2025 .set_msglevel = set_msglevel,
2026 .get_ringparam = get_sge_param,
2027 .set_ringparam = set_sge_param,
2028 .get_coalesce = get_coalesce,
2029 .set_coalesce = set_coalesce,
2030 .get_eeprom_len = get_eeprom_len,
2031 .get_eeprom = get_eeprom,
2032 .set_eeprom = set_eeprom,
2033 .get_pauseparam = get_pauseparam,
2034 .set_pauseparam = set_pauseparam,
2035 .get_rx_csum = get_rx_csum,
2036 .set_rx_csum = set_rx_csum,
2037 .set_tx_csum = ethtool_op_set_tx_csum,
2038 .set_sg = ethtool_op_set_sg,
2039 .get_link = ethtool_op_get_link,
2040 .get_strings = get_strings,
2041 .phys_id = cxgb3_phys_id,
2042 .nway_reset = restart_autoneg,
2043 .get_sset_count = get_sset_count,
2044 .get_ethtool_stats = get_stats,
2045 .get_regs_len = get_regs_len,
2046 .get_regs = get_regs,
2048 .set_tso = ethtool_op_set_tso,
2051 static int in_range(int val, int lo, int hi)
2053 return val < 0 || (val <= hi && val >= lo);
2056 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2058 struct port_info *pi = netdev_priv(dev);
2059 struct adapter *adapter = pi->adapter;
2063 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2067 case CHELSIO_SET_QSET_PARAMS:{
2069 struct qset_params *q;
2070 struct ch_qset_params t;
2071 int q1 = pi->first_qset;
2072 int nqsets = pi->nqsets;
2074 if (!capable(CAP_NET_ADMIN))
2076 if (copy_from_user(&t, useraddr, sizeof(t)))
2078 if (t.qset_idx >= SGE_QSETS)
2080 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2081 !in_range(t.cong_thres, 0, 255) ||
2082 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2084 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2086 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2087 MAX_CTRL_TXQ_ENTRIES) ||
2088 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2090 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2091 MAX_RX_JUMBO_BUFFERS)
2092 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2096 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2097 for_each_port(adapter, i) {
2098 pi = adap2pinfo(adapter, i);
2099 if (t.qset_idx >= pi->first_qset &&
2100 t.qset_idx < pi->first_qset + pi->nqsets &&
2101 !(pi->rx_offload & T3_RX_CSUM))
2105 if ((adapter->flags & FULL_INIT_DONE) &&
2106 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2107 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2108 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2109 t.polling >= 0 || t.cong_thres >= 0))
2112 /* Allow setting of any available qset when offload enabled */
2113 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2115 for_each_port(adapter, i) {
2116 pi = adap2pinfo(adapter, i);
2117 nqsets += pi->first_qset + pi->nqsets;
2121 if (t.qset_idx < q1)
2123 if (t.qset_idx > q1 + nqsets - 1)
2126 q = &adapter->params.sge.qset[t.qset_idx];
2128 if (t.rspq_size >= 0)
2129 q->rspq_size = t.rspq_size;
2130 if (t.fl_size[0] >= 0)
2131 q->fl_size = t.fl_size[0];
2132 if (t.fl_size[1] >= 0)
2133 q->jumbo_size = t.fl_size[1];
2134 if (t.txq_size[0] >= 0)
2135 q->txq_size[0] = t.txq_size[0];
2136 if (t.txq_size[1] >= 0)
2137 q->txq_size[1] = t.txq_size[1];
2138 if (t.txq_size[2] >= 0)
2139 q->txq_size[2] = t.txq_size[2];
2140 if (t.cong_thres >= 0)
2141 q->cong_thres = t.cong_thres;
2142 if (t.intr_lat >= 0) {
2143 struct sge_qset *qs =
2144 &adapter->sge.qs[t.qset_idx];
2146 q->coalesce_usecs = t.intr_lat;
2147 t3_update_qset_coalesce(qs, q);
2149 if (t.polling >= 0) {
2150 if (adapter->flags & USING_MSIX)
2151 q->polling = t.polling;
2153 /* No polling with INTx for T3A */
2154 if (adapter->params.rev == 0 &&
2155 !(adapter->flags & USING_MSI))
2158 for (i = 0; i < SGE_QSETS; i++) {
2159 q = &adapter->params.sge.
2161 q->polling = t.polling;
2166 set_qset_lro(dev, t.qset_idx, t.lro);
2170 case CHELSIO_GET_QSET_PARAMS:{
2171 struct qset_params *q;
2172 struct ch_qset_params t;
2173 int q1 = pi->first_qset;
2174 int nqsets = pi->nqsets;
2177 if (copy_from_user(&t, useraddr, sizeof(t)))
2180 /* Display qsets for all ports when offload enabled */
2181 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2183 for_each_port(adapter, i) {
2184 pi = adap2pinfo(adapter, i);
2185 nqsets = pi->first_qset + pi->nqsets;
2189 if (t.qset_idx >= nqsets)
2192 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2193 t.rspq_size = q->rspq_size;
2194 t.txq_size[0] = q->txq_size[0];
2195 t.txq_size[1] = q->txq_size[1];
2196 t.txq_size[2] = q->txq_size[2];
2197 t.fl_size[0] = q->fl_size;
2198 t.fl_size[1] = q->jumbo_size;
2199 t.polling = q->polling;
2201 t.intr_lat = q->coalesce_usecs;
2202 t.cong_thres = q->cong_thres;
2205 if (adapter->flags & USING_MSIX)
2206 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2208 t.vector = adapter->pdev->irq;
2210 if (copy_to_user(useraddr, &t, sizeof(t)))
2214 case CHELSIO_SET_QSET_NUM:{
2215 struct ch_reg edata;
2216 unsigned int i, first_qset = 0, other_qsets = 0;
2218 if (!capable(CAP_NET_ADMIN))
2220 if (adapter->flags & FULL_INIT_DONE)
2222 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2224 if (edata.val < 1 ||
2225 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2228 for_each_port(adapter, i)
2229 if (adapter->port[i] && adapter->port[i] != dev)
2230 other_qsets += adap2pinfo(adapter, i)->nqsets;
2232 if (edata.val + other_qsets > SGE_QSETS)
2235 pi->nqsets = edata.val;
2237 for_each_port(adapter, i)
2238 if (adapter->port[i]) {
2239 pi = adap2pinfo(adapter, i);
2240 pi->first_qset = first_qset;
2241 first_qset += pi->nqsets;
2245 case CHELSIO_GET_QSET_NUM:{
2246 struct ch_reg edata;
2248 edata.cmd = CHELSIO_GET_QSET_NUM;
2249 edata.val = pi->nqsets;
2250 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2254 case CHELSIO_LOAD_FW:{
2256 struct ch_mem_range t;
2258 if (!capable(CAP_SYS_RAWIO))
2260 if (copy_from_user(&t, useraddr, sizeof(t)))
2262 /* Check t.len sanity ? */
2263 fw_data = kmalloc(t.len, GFP_KERNEL);
2268 (fw_data, useraddr + sizeof(t), t.len)) {
2273 ret = t3_load_fw(adapter, fw_data, t.len);
2279 case CHELSIO_SETMTUTAB:{
2283 if (!is_offload(adapter))
2285 if (!capable(CAP_NET_ADMIN))
2287 if (offload_running(adapter))
2289 if (copy_from_user(&m, useraddr, sizeof(m)))
2291 if (m.nmtus != NMTUS)
2293 if (m.mtus[0] < 81) /* accommodate SACK */
2296 /* MTUs must be in ascending order */
2297 for (i = 1; i < NMTUS; ++i)
2298 if (m.mtus[i] < m.mtus[i - 1])
2301 memcpy(adapter->params.mtus, m.mtus,
2302 sizeof(adapter->params.mtus));
2305 case CHELSIO_GET_PM:{
2306 struct tp_params *p = &adapter->params.tp;
2307 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2309 if (!is_offload(adapter))
2311 m.tx_pg_sz = p->tx_pg_size;
2312 m.tx_num_pg = p->tx_num_pgs;
2313 m.rx_pg_sz = p->rx_pg_size;
2314 m.rx_num_pg = p->rx_num_pgs;
2315 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2316 if (copy_to_user(useraddr, &m, sizeof(m)))
2320 case CHELSIO_SET_PM:{
2322 struct tp_params *p = &adapter->params.tp;
2324 if (!is_offload(adapter))
2326 if (!capable(CAP_NET_ADMIN))
2328 if (adapter->flags & FULL_INIT_DONE)
2330 if (copy_from_user(&m, useraddr, sizeof(m)))
2332 if (!is_power_of_2(m.rx_pg_sz) ||
2333 !is_power_of_2(m.tx_pg_sz))
2334 return -EINVAL; /* not power of 2 */
2335 if (!(m.rx_pg_sz & 0x14000))
2336 return -EINVAL; /* not 16KB or 64KB */
2337 if (!(m.tx_pg_sz & 0x1554000))
2339 if (m.tx_num_pg == -1)
2340 m.tx_num_pg = p->tx_num_pgs;
2341 if (m.rx_num_pg == -1)
2342 m.rx_num_pg = p->rx_num_pgs;
2343 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2345 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2346 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2348 p->rx_pg_size = m.rx_pg_sz;
2349 p->tx_pg_size = m.tx_pg_sz;
2350 p->rx_num_pgs = m.rx_num_pg;
2351 p->tx_num_pgs = m.tx_num_pg;
2354 case CHELSIO_GET_MEM:{
2355 struct ch_mem_range t;
2359 if (!is_offload(adapter))
2361 if (!(adapter->flags & FULL_INIT_DONE))
2362 return -EIO; /* need the memory controllers */
2363 if (copy_from_user(&t, useraddr, sizeof(t)))
2365 if ((t.addr & 7) || (t.len & 7))
2367 if (t.mem_id == MEM_CM)
2369 else if (t.mem_id == MEM_PMRX)
2370 mem = &adapter->pmrx;
2371 else if (t.mem_id == MEM_PMTX)
2372 mem = &adapter->pmtx;
2378 * bits 0..9: chip version
2379 * bits 10..15: chip revision
2381 t.version = 3 | (adapter->params.rev << 10);
2382 if (copy_to_user(useraddr, &t, sizeof(t)))
2386 * Read 256 bytes at a time as len can be large and we don't
2387 * want to use huge intermediate buffers.
2389 useraddr += sizeof(t); /* advance to start of buffer */
2391 unsigned int chunk =
2392 min_t(unsigned int, t.len, sizeof(buf));
2395 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2399 if (copy_to_user(useraddr, buf, chunk))
2407 case CHELSIO_SET_TRACE_FILTER:{
2409 const struct trace_params *tp;
2411 if (!capable(CAP_NET_ADMIN))
2413 if (!offload_running(adapter))
2415 if (copy_from_user(&t, useraddr, sizeof(t)))
2418 tp = (const struct trace_params *)&t.sip;
2420 t3_config_trace_filter(adapter, tp, 0,
2424 t3_config_trace_filter(adapter, tp, 1,
2435 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2437 struct mii_ioctl_data *data = if_mii(req);
2438 struct port_info *pi = netdev_priv(dev);
2439 struct adapter *adapter = pi->adapter;
2444 /* Convert phy_id from older PRTAD/DEVAD format */
2445 if (is_10G(adapter) &&
2446 !mdio_phy_id_is_c45(data->phy_id) &&
2447 (data->phy_id & 0x1f00) &&
2448 !(data->phy_id & 0xe0e0))
2449 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2450 data->phy_id & 0x1f);
2453 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2455 return cxgb_extension_ioctl(dev, req->ifr_data);
2461 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2463 struct port_info *pi = netdev_priv(dev);
2464 struct adapter *adapter = pi->adapter;
2467 if (new_mtu < 81) /* accommodate SACK */
2469 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2472 init_port_mtus(adapter);
2473 if (adapter->params.rev == 0 && offload_running(adapter))
2474 t3_load_mtus(adapter, adapter->params.mtus,
2475 adapter->params.a_wnd, adapter->params.b_wnd,
2476 adapter->port[0]->mtu);
2480 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2482 struct port_info *pi = netdev_priv(dev);
2483 struct adapter *adapter = pi->adapter;
2484 struct sockaddr *addr = p;
2486 if (!is_valid_ether_addr(addr->sa_data))
2489 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2490 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2491 if (offload_running(adapter))
2492 write_smt_entry(adapter, pi->port_id);
2497 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2498 * @adap: the adapter
2501 * Ensures that current Rx processing on any of the queues associated with
2502 * the given port completes before returning. We do this by acquiring and
2503 * releasing the locks of the response queues associated with the port.
2505 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2509 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2510 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2512 spin_lock_irq(&q->lock);
2513 spin_unlock_irq(&q->lock);
2517 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2519 struct port_info *pi = netdev_priv(dev);
2520 struct adapter *adapter = pi->adapter;
2523 if (adapter->params.rev > 0)
2524 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2526 /* single control for all ports */
2527 unsigned int i, have_vlans = 0;
2528 for_each_port(adapter, i)
2529 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2531 t3_set_vlan_accel(adapter, 1, have_vlans);
2533 t3_synchronize_rx(adapter, pi);
2536 #ifdef CONFIG_NET_POLL_CONTROLLER
2537 static void cxgb_netpoll(struct net_device *dev)
2539 struct port_info *pi = netdev_priv(dev);
2540 struct adapter *adapter = pi->adapter;
2543 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2544 struct sge_qset *qs = &adapter->sge.qs[qidx];
2547 if (adapter->flags & USING_MSIX)
2552 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2558 * Periodic accumulation of MAC statistics.
2560 static void mac_stats_update(struct adapter *adapter)
2564 for_each_port(adapter, i) {
2565 struct net_device *dev = adapter->port[i];
2566 struct port_info *p = netdev_priv(dev);
2568 if (netif_running(dev)) {
2569 spin_lock(&adapter->stats_lock);
2570 t3_mac_update_stats(&p->mac);
2571 spin_unlock(&adapter->stats_lock);
2576 static void check_link_status(struct adapter *adapter)
2580 for_each_port(adapter, i) {
2581 struct net_device *dev = adapter->port[i];
2582 struct port_info *p = netdev_priv(dev);
2585 spin_lock_irq(&adapter->work_lock);
2586 link_fault = p->link_fault;
2587 spin_unlock_irq(&adapter->work_lock);
2590 t3_link_fault(adapter, i);
2594 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2595 t3_xgm_intr_disable(adapter, i);
2596 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2598 t3_link_changed(adapter, i);
2599 t3_xgm_intr_enable(adapter, i);
2604 static void check_t3b2_mac(struct adapter *adapter)
2608 if (!rtnl_trylock()) /* synchronize with ifdown */
2611 for_each_port(adapter, i) {
2612 struct net_device *dev = adapter->port[i];
2613 struct port_info *p = netdev_priv(dev);
2616 if (!netif_running(dev))
2620 if (netif_running(dev) && netif_carrier_ok(dev))
2621 status = t3b2_mac_watchdog_task(&p->mac);
2623 p->mac.stats.num_toggled++;
2624 else if (status == 2) {
2625 struct cmac *mac = &p->mac;
2627 t3_mac_set_mtu(mac, dev->mtu);
2628 t3_mac_set_address(mac, 0, dev->dev_addr);
2629 cxgb_set_rxmode(dev);
2630 t3_link_start(&p->phy, mac, &p->link_config);
2631 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2632 t3_port_intr_enable(adapter, p->port_id);
2633 p->mac.stats.num_resets++;
2640 static void t3_adap_check_task(struct work_struct *work)
2642 struct adapter *adapter = container_of(work, struct adapter,
2643 adap_check_task.work);
2644 const struct adapter_params *p = &adapter->params;
2646 unsigned int v, status, reset;
2648 adapter->check_task_cnt++;
2650 check_link_status(adapter);
2652 /* Accumulate MAC stats if needed */
2653 if (!p->linkpoll_period ||
2654 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2655 p->stats_update_period) {
2656 mac_stats_update(adapter);
2657 adapter->check_task_cnt = 0;
2660 if (p->rev == T3_REV_B2)
2661 check_t3b2_mac(adapter);
2664 * Scan the XGMAC's to check for various conditions which we want to
2665 * monitor in a periodic polling manner rather than via an interrupt
2666 * condition. This is used for conditions which would otherwise flood
2667 * the system with interrupts and we only really need to know that the
2668 * conditions are "happening" ... For each condition we count the
2669 * detection of the condition and reset it for the next polling loop.
2671 for_each_port(adapter, port) {
2672 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2675 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2677 if (cause & F_RXFIFO_OVERFLOW) {
2678 mac->stats.rx_fifo_ovfl++;
2679 reset |= F_RXFIFO_OVERFLOW;
2682 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2686 * We do the same as above for FL_EMPTY interrupts.
2688 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2691 if (status & F_FLEMPTY) {
2692 struct sge_qset *qs = &adapter->sge.qs[0];
2697 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2701 qs->fl[i].empty += (v & 1);
2709 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2711 /* Schedule the next check update if any port is active. */
2712 spin_lock_irq(&adapter->work_lock);
2713 if (adapter->open_device_map & PORT_MASK)
2714 schedule_chk_task(adapter);
2715 spin_unlock_irq(&adapter->work_lock);
2719 * Processes external (PHY) interrupts in process context.
2721 static void ext_intr_task(struct work_struct *work)
2723 struct adapter *adapter = container_of(work, struct adapter,
2724 ext_intr_handler_task);
2727 /* Disable link fault interrupts */
2728 for_each_port(adapter, i) {
2729 struct net_device *dev = adapter->port[i];
2730 struct port_info *p = netdev_priv(dev);
2732 t3_xgm_intr_disable(adapter, i);
2733 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2736 /* Re-enable link fault interrupts */
2737 t3_phy_intr_handler(adapter);
2739 for_each_port(adapter, i)
2740 t3_xgm_intr_enable(adapter, i);
2742 /* Now reenable external interrupts */
2743 spin_lock_irq(&adapter->work_lock);
2744 if (adapter->slow_intr_mask) {
2745 adapter->slow_intr_mask |= F_T3DBG;
2746 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2747 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2748 adapter->slow_intr_mask);
2750 spin_unlock_irq(&adapter->work_lock);
2754 * Interrupt-context handler for external (PHY) interrupts.
2756 void t3_os_ext_intr_handler(struct adapter *adapter)
2759 * Schedule a task to handle external interrupts as they may be slow
2760 * and we use a mutex to protect MDIO registers. We disable PHY
2761 * interrupts in the meantime and let the task reenable them when
2764 spin_lock(&adapter->work_lock);
2765 if (adapter->slow_intr_mask) {
2766 adapter->slow_intr_mask &= ~F_T3DBG;
2767 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2768 adapter->slow_intr_mask);
2769 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2771 spin_unlock(&adapter->work_lock);
2774 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2776 struct net_device *netdev = adapter->port[port_id];
2777 struct port_info *pi = netdev_priv(netdev);
2779 spin_lock(&adapter->work_lock);
2781 spin_unlock(&adapter->work_lock);
2784 static int t3_adapter_error(struct adapter *adapter, int reset)
2788 if (is_offload(adapter) &&
2789 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2790 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2791 offload_close(&adapter->tdev);
2794 /* Stop all ports */
2795 for_each_port(adapter, i) {
2796 struct net_device *netdev = adapter->port[i];
2798 if (netif_running(netdev))
2802 /* Stop SGE timers */
2803 t3_stop_sge_timers(adapter);
2805 adapter->flags &= ~FULL_INIT_DONE;
2808 ret = t3_reset_adapter(adapter);
2810 pci_disable_device(adapter->pdev);
2815 static int t3_reenable_adapter(struct adapter *adapter)
2817 if (pci_enable_device(adapter->pdev)) {
2818 dev_err(&adapter->pdev->dev,
2819 "Cannot re-enable PCI device after reset.\n");
2822 pci_set_master(adapter->pdev);
2823 pci_restore_state(adapter->pdev);
2825 /* Free sge resources */
2826 t3_free_sge_resources(adapter);
2828 if (t3_replay_prep_adapter(adapter))
2836 static void t3_resume_ports(struct adapter *adapter)
2840 /* Restart the ports */
2841 for_each_port(adapter, i) {
2842 struct net_device *netdev = adapter->port[i];
2844 if (netif_running(netdev)) {
2845 if (cxgb_open(netdev)) {
2846 dev_err(&adapter->pdev->dev,
2847 "can't bring device back up"
2854 if (is_offload(adapter) && !ofld_disable)
2855 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2859 * processes a fatal error.
2860 * Bring the ports down, reset the chip, bring the ports back up.
2862 static void fatal_error_task(struct work_struct *work)
2864 struct adapter *adapter = container_of(work, struct adapter,
2865 fatal_error_handler_task);
2869 err = t3_adapter_error(adapter, 1);
2871 err = t3_reenable_adapter(adapter);
2873 t3_resume_ports(adapter);
2875 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2879 void t3_fatal_err(struct adapter *adapter)
2881 unsigned int fw_status[4];
2883 if (adapter->flags & FULL_INIT_DONE) {
2884 t3_sge_stop(adapter);
2885 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2886 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2887 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2888 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2890 spin_lock(&adapter->work_lock);
2891 t3_intr_disable(adapter);
2892 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2893 spin_unlock(&adapter->work_lock);
2895 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2896 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2897 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2898 fw_status[0], fw_status[1],
2899 fw_status[2], fw_status[3]);
2903 * t3_io_error_detected - called when PCI error is detected
2904 * @pdev: Pointer to PCI device
2905 * @state: The current pci connection state
2907 * This function is called after a PCI bus error affecting
2908 * this device has been detected.
2910 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2911 pci_channel_state_t state)
2913 struct adapter *adapter = pci_get_drvdata(pdev);
2916 if (state == pci_channel_io_perm_failure)
2917 return PCI_ERS_RESULT_DISCONNECT;
2919 ret = t3_adapter_error(adapter, 0);
2921 /* Request a slot reset. */
2922 return PCI_ERS_RESULT_NEED_RESET;
2926 * t3_io_slot_reset - called after the pci bus has been reset.
2927 * @pdev: Pointer to PCI device
2929 * Restart the card from scratch, as if from a cold-boot.
2931 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2933 struct adapter *adapter = pci_get_drvdata(pdev);
2935 if (!t3_reenable_adapter(adapter))
2936 return PCI_ERS_RESULT_RECOVERED;
2938 return PCI_ERS_RESULT_DISCONNECT;
2942 * t3_io_resume - called when traffic can start flowing again.
2943 * @pdev: Pointer to PCI device
2945 * This callback is called when the error recovery driver tells us that
2946 * its OK to resume normal operation.
2948 static void t3_io_resume(struct pci_dev *pdev)
2950 struct adapter *adapter = pci_get_drvdata(pdev);
2952 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2953 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2955 t3_resume_ports(adapter);
2958 static struct pci_error_handlers t3_err_handler = {
2959 .error_detected = t3_io_error_detected,
2960 .slot_reset = t3_io_slot_reset,
2961 .resume = t3_io_resume,
2965 * Set the number of qsets based on the number of CPUs and the number of ports,
2966 * not to exceed the number of available qsets, assuming there are enough qsets
2969 static void set_nqsets(struct adapter *adap)
2972 int num_cpus = num_online_cpus();
2973 int hwports = adap->params.nports;
2974 int nqsets = adap->msix_nvectors - 1;
2976 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2978 (hwports * nqsets > SGE_QSETS ||
2979 num_cpus >= nqsets / hwports))
2981 if (nqsets > num_cpus)
2983 if (nqsets < 1 || hwports == 4)
2988 for_each_port(adap, i) {
2989 struct port_info *pi = adap2pinfo(adap, i);
2992 pi->nqsets = nqsets;
2993 j = pi->first_qset + nqsets;
2995 dev_info(&adap->pdev->dev,
2996 "Port %d using %d queue sets.\n", i, nqsets);
3000 static int __devinit cxgb_enable_msix(struct adapter *adap)
3002 struct msix_entry entries[SGE_QSETS + 1];
3006 vectors = ARRAY_SIZE(entries);
3007 for (i = 0; i < vectors; ++i)
3008 entries[i].entry = i;
3010 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3014 pci_disable_msix(adap->pdev);
3016 if (!err && vectors < (adap->params.nports + 1)) {
3017 pci_disable_msix(adap->pdev);
3022 for (i = 0; i < vectors; ++i)
3023 adap->msix_info[i].vec = entries[i].vector;
3024 adap->msix_nvectors = vectors;
3030 static void __devinit print_port_info(struct adapter *adap,
3031 const struct adapter_info *ai)
3033 static const char *pci_variant[] = {
3034 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3041 snprintf(buf, sizeof(buf), "%s x%d",
3042 pci_variant[adap->params.pci.variant],
3043 adap->params.pci.width);
3045 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3046 pci_variant[adap->params.pci.variant],
3047 adap->params.pci.speed, adap->params.pci.width);
3049 for_each_port(adap, i) {
3050 struct net_device *dev = adap->port[i];
3051 const struct port_info *pi = netdev_priv(dev);
3053 if (!test_bit(i, &adap->registered_device_map))
3055 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3056 dev->name, ai->desc, pi->phy.desc,
3057 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3058 (adap->flags & USING_MSIX) ? " MSI-X" :
3059 (adap->flags & USING_MSI) ? " MSI" : "");
3060 if (adap->name == dev->name && adap->params.vpd.mclk)
3062 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3063 adap->name, t3_mc7_size(&adap->cm) >> 20,
3064 t3_mc7_size(&adap->pmtx) >> 20,
3065 t3_mc7_size(&adap->pmrx) >> 20,
3066 adap->params.vpd.sn);
3070 static const struct net_device_ops cxgb_netdev_ops = {
3071 .ndo_open = cxgb_open,
3072 .ndo_stop = cxgb_close,
3073 .ndo_start_xmit = t3_eth_xmit,
3074 .ndo_get_stats = cxgb_get_stats,
3075 .ndo_validate_addr = eth_validate_addr,
3076 .ndo_set_multicast_list = cxgb_set_rxmode,
3077 .ndo_do_ioctl = cxgb_ioctl,
3078 .ndo_change_mtu = cxgb_change_mtu,
3079 .ndo_set_mac_address = cxgb_set_mac_addr,
3080 .ndo_vlan_rx_register = vlan_rx_register,
3081 #ifdef CONFIG_NET_POLL_CONTROLLER
3082 .ndo_poll_controller = cxgb_netpoll,
3086 static int __devinit init_one(struct pci_dev *pdev,
3087 const struct pci_device_id *ent)
3089 static int version_printed;
3091 int i, err, pci_using_dac = 0;
3092 resource_size_t mmio_start, mmio_len;
3093 const struct adapter_info *ai;
3094 struct adapter *adapter = NULL;
3095 struct port_info *pi;
3097 if (!version_printed) {
3098 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3103 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3105 printk(KERN_ERR DRV_NAME
3106 ": cannot initialize work queue\n");
3111 err = pci_request_regions(pdev, DRV_NAME);
3113 /* Just info, some other driver may have claimed the device. */
3114 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3118 err = pci_enable_device(pdev);
3120 dev_err(&pdev->dev, "cannot enable PCI device\n");
3121 goto out_release_regions;
3124 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3126 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3128 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3129 "coherent allocations\n");
3130 goto out_disable_device;
3132 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3133 dev_err(&pdev->dev, "no usable DMA configuration\n");
3134 goto out_disable_device;
3137 pci_set_master(pdev);
3138 pci_save_state(pdev);
3140 mmio_start = pci_resource_start(pdev, 0);
3141 mmio_len = pci_resource_len(pdev, 0);
3142 ai = t3_get_adapter_info(ent->driver_data);
3144 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3147 goto out_disable_device;
3150 adapter->nofail_skb =
3151 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3152 if (!adapter->nofail_skb) {
3153 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3155 goto out_free_adapter;
3158 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3159 if (!adapter->regs) {
3160 dev_err(&pdev->dev, "cannot map device registers\n");
3162 goto out_free_adapter;
3165 adapter->pdev = pdev;
3166 adapter->name = pci_name(pdev);
3167 adapter->msg_enable = dflt_msg_enable;
3168 adapter->mmio_len = mmio_len;
3170 mutex_init(&adapter->mdio_lock);
3171 spin_lock_init(&adapter->work_lock);
3172 spin_lock_init(&adapter->stats_lock);
3174 INIT_LIST_HEAD(&adapter->adapter_list);
3175 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3176 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3177 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3179 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3180 struct net_device *netdev;
3182 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3188 SET_NETDEV_DEV(netdev, &pdev->dev);
3190 adapter->port[i] = netdev;
3191 pi = netdev_priv(netdev);
3192 pi->adapter = adapter;
3193 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3195 netif_carrier_off(netdev);
3196 netif_tx_stop_all_queues(netdev);
3197 netdev->irq = pdev->irq;
3198 netdev->mem_start = mmio_start;
3199 netdev->mem_end = mmio_start + mmio_len - 1;
3200 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3201 netdev->features |= NETIF_F_GRO;
3203 netdev->features |= NETIF_F_HIGHDMA;
3205 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3206 netdev->netdev_ops = &cxgb_netdev_ops;
3207 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3210 pci_set_drvdata(pdev, adapter);
3211 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3217 * The card is now ready to go. If any errors occur during device
3218 * registration we do not fail the whole card but rather proceed only
3219 * with the ports we manage to register successfully. However we must
3220 * register at least one net device.
3222 for_each_port(adapter, i) {
3223 err = register_netdev(adapter->port[i]);
3225 dev_warn(&pdev->dev,
3226 "cannot register net device %s, skipping\n",
3227 adapter->port[i]->name);
3230 * Change the name we use for messages to the name of
3231 * the first successfully registered interface.
3233 if (!adapter->registered_device_map)
3234 adapter->name = adapter->port[i]->name;
3236 __set_bit(i, &adapter->registered_device_map);
3239 if (!adapter->registered_device_map) {
3240 dev_err(&pdev->dev, "could not register any net devices\n");
3244 /* Driver's ready. Reflect it on LEDs */
3245 t3_led_ready(adapter);
3247 if (is_offload(adapter)) {
3248 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3249 cxgb3_adapter_ofld(adapter);
3252 /* See what interrupts we'll be using */
3253 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3254 adapter->flags |= USING_MSIX;
3255 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3256 adapter->flags |= USING_MSI;
3258 set_nqsets(adapter);
3260 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3263 print_port_info(adapter, ai);
3267 iounmap(adapter->regs);
3268 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3269 if (adapter->port[i])
3270 free_netdev(adapter->port[i]);
3276 pci_disable_device(pdev);
3277 out_release_regions:
3278 pci_release_regions(pdev);
3279 pci_set_drvdata(pdev, NULL);
3283 static void __devexit remove_one(struct pci_dev *pdev)
3285 struct adapter *adapter = pci_get_drvdata(pdev);
3290 t3_sge_stop(adapter);
3291 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3294 if (is_offload(adapter)) {
3295 cxgb3_adapter_unofld(adapter);
3296 if (test_bit(OFFLOAD_DEVMAP_BIT,
3297 &adapter->open_device_map))
3298 offload_close(&adapter->tdev);
3301 for_each_port(adapter, i)
3302 if (test_bit(i, &adapter->registered_device_map))
3303 unregister_netdev(adapter->port[i]);
3305 t3_stop_sge_timers(adapter);
3306 t3_free_sge_resources(adapter);
3307 cxgb_disable_msi(adapter);
3309 for_each_port(adapter, i)
3310 if (adapter->port[i])
3311 free_netdev(adapter->port[i]);
3313 iounmap(adapter->regs);
3314 if (adapter->nofail_skb)
3315 kfree_skb(adapter->nofail_skb);
3317 pci_release_regions(pdev);
3318 pci_disable_device(pdev);
3319 pci_set_drvdata(pdev, NULL);
3323 static struct pci_driver driver = {
3325 .id_table = cxgb3_pci_tbl,
3327 .remove = __devexit_p(remove_one),
3328 .err_handler = &t3_err_handler,
3331 static int __init cxgb3_init_module(void)
3335 cxgb3_offload_init();
3337 ret = pci_register_driver(&driver);
3341 static void __exit cxgb3_cleanup_module(void)
3343 pci_unregister_driver(&driver);
3345 destroy_workqueue(cxgb3_wq);
3348 module_init(cxgb3_init_module);
3349 module_exit(cxgb3_cleanup_module);