2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
111 * The driver uses the best interrupt scheme available on a platform in the
112 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
113 * of these schemes the driver may consider as follows:
115 * msi = 2: choose from among all three options
116 * msi = 1: only consider MSI and pin interrupts
117 * msi = 0: force pin interrupts
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
125 * The driver enables offload as a default.
126 * To disable it, use ofld_disable = 1.
129 static int ofld_disable = 0;
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
135 * We have work elements that we need to cancel when an interface is taken
136 * down. Normally the work elements would be executed by keventd but that
137 * can deadlock because of linkwatch. If our close method takes the rtnl
138 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140 * for our work to complete. Get our own work queue to solve this.
142 static struct workqueue_struct *cxgb3_wq;
145 * link_report - show link status and link speed/duplex
146 * @p: the port whose settings are to be reported
148 * Shows the link status, speed, and duplex of a port.
150 static void link_report(struct net_device *dev)
152 if (!netif_carrier_ok(dev))
153 printk(KERN_INFO "%s: link down\n", dev->name);
155 const char *s = "10Mbps";
156 const struct port_info *p = netdev_priv(dev);
158 switch (p->link_config.speed) {
170 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 static void enable_tx_fifo_drain(struct adapter *adapter,
176 struct port_info *pi)
178 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
180 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
181 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
182 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
185 static void disable_tx_fifo_drain(struct adapter *adapter,
186 struct port_info *pi)
188 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
192 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
194 struct net_device *dev = adap->port[port_id];
195 struct port_info *pi = netdev_priv(dev);
197 if (state == netif_carrier_ok(dev))
201 struct cmac *mac = &pi->mac;
203 netif_carrier_on(dev);
205 disable_tx_fifo_drain(adap, pi);
207 /* Clear local faults */
208 t3_xgm_intr_disable(adap, pi->port_id);
209 t3_read_reg(adap, A_XGM_INT_STATUS +
212 A_XGM_INT_CAUSE + pi->mac.offset,
215 t3_set_reg_field(adap,
218 F_XGM_INT, F_XGM_INT);
219 t3_xgm_intr_enable(adap, pi->port_id);
221 t3_mac_enable(mac, MAC_DIRECTION_TX);
223 netif_carrier_off(dev);
226 enable_tx_fifo_drain(adap, pi);
232 * t3_os_link_changed - handle link status changes
233 * @adapter: the adapter associated with the link change
234 * @port_id: the port index whose limk status has changed
235 * @link_stat: the new status of the link
236 * @speed: the new speed setting
237 * @duplex: the new duplex setting
238 * @pause: the new flow-control setting
240 * This is the OS-dependent handler for link status changes. The OS
241 * neutral handler takes care of most of the processing for these events,
242 * then calls this handler for any OS-specific processing.
244 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
245 int speed, int duplex, int pause)
247 struct net_device *dev = adapter->port[port_id];
248 struct port_info *pi = netdev_priv(dev);
249 struct cmac *mac = &pi->mac;
251 /* Skip changes from disabled ports. */
252 if (!netif_running(dev))
255 if (link_stat != netif_carrier_ok(dev)) {
257 disable_tx_fifo_drain(adapter, pi);
259 t3_mac_enable(mac, MAC_DIRECTION_RX);
261 /* Clear local faults */
262 t3_xgm_intr_disable(adapter, pi->port_id);
263 t3_read_reg(adapter, A_XGM_INT_STATUS +
265 t3_write_reg(adapter,
266 A_XGM_INT_CAUSE + pi->mac.offset,
269 t3_set_reg_field(adapter,
270 A_XGM_INT_ENABLE + pi->mac.offset,
271 F_XGM_INT, F_XGM_INT);
272 t3_xgm_intr_enable(adapter, pi->port_id);
274 netif_carrier_on(dev);
276 netif_carrier_off(dev);
278 t3_xgm_intr_disable(adapter, pi->port_id);
279 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
280 t3_set_reg_field(adapter,
281 A_XGM_INT_ENABLE + pi->mac.offset,
285 pi->phy.ops->power_down(&pi->phy, 1);
287 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288 t3_mac_disable(mac, MAC_DIRECTION_RX);
289 t3_link_start(&pi->phy, mac, &pi->link_config);
292 enable_tx_fifo_drain(adapter, pi);
300 * t3_os_phymod_changed - handle PHY module changes
301 * @phy: the PHY reporting the module change
302 * @mod_type: new module type
304 * This is the OS-dependent handler for PHY module changes. It is
305 * invoked when a PHY module is removed or inserted for any OS-specific
308 void t3_os_phymod_changed(struct adapter *adap, int port_id)
310 static const char *mod_str[] = {
311 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
314 const struct net_device *dev = adap->port[port_id];
315 const struct port_info *pi = netdev_priv(dev);
317 if (pi->phy.modtype == phy_modtype_none)
318 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
320 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
321 mod_str[pi->phy.modtype]);
324 static void cxgb_set_rxmode(struct net_device *dev)
326 struct t3_rx_mode rm;
327 struct port_info *pi = netdev_priv(dev);
329 init_rx_mode(&rm, dev, dev->mc_list);
330 t3_mac_set_rx_mode(&pi->mac, &rm);
334 * link_start - enable a port
335 * @dev: the device to enable
337 * Performs the MAC and PHY actions needed to enable a port.
339 static void link_start(struct net_device *dev)
341 struct t3_rx_mode rm;
342 struct port_info *pi = netdev_priv(dev);
343 struct cmac *mac = &pi->mac;
345 init_rx_mode(&rm, dev, dev->mc_list);
347 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
348 t3_mac_set_mtu(mac, dev->mtu);
349 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
350 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
351 t3_mac_set_rx_mode(mac, &rm);
352 t3_link_start(&pi->phy, mac, &pi->link_config);
353 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
356 static inline void cxgb_disable_msi(struct adapter *adapter)
358 if (adapter->flags & USING_MSIX) {
359 pci_disable_msix(adapter->pdev);
360 adapter->flags &= ~USING_MSIX;
361 } else if (adapter->flags & USING_MSI) {
362 pci_disable_msi(adapter->pdev);
363 adapter->flags &= ~USING_MSI;
368 * Interrupt handler for asynchronous events used with MSI-X.
370 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
372 t3_slow_intr_handler(cookie);
377 * Name the MSI-X interrupts.
379 static void name_msix_vecs(struct adapter *adap)
381 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
383 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
384 adap->msix_info[0].desc[n] = 0;
386 for_each_port(adap, j) {
387 struct net_device *d = adap->port[j];
388 const struct port_info *pi = netdev_priv(d);
390 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
391 snprintf(adap->msix_info[msi_idx].desc, n,
392 "%s-%d", d->name, pi->first_qset + i);
393 adap->msix_info[msi_idx].desc[n] = 0;
398 static int request_msix_data_irqs(struct adapter *adap)
400 int i, j, err, qidx = 0;
402 for_each_port(adap, i) {
403 int nqsets = adap2pinfo(adap, i)->nqsets;
405 for (j = 0; j < nqsets; ++j) {
406 err = request_irq(adap->msix_info[qidx + 1].vec,
407 t3_intr_handler(adap,
410 adap->msix_info[qidx + 1].desc,
411 &adap->sge.qs[qidx]);
414 free_irq(adap->msix_info[qidx + 1].vec,
415 &adap->sge.qs[qidx]);
424 static void free_irq_resources(struct adapter *adapter)
426 if (adapter->flags & USING_MSIX) {
429 free_irq(adapter->msix_info[0].vec, adapter);
430 for_each_port(adapter, i)
431 n += adap2pinfo(adapter, i)->nqsets;
433 for (i = 0; i < n; ++i)
434 free_irq(adapter->msix_info[i + 1].vec,
435 &adapter->sge.qs[i]);
437 free_irq(adapter->pdev->irq, adapter);
440 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
453 static int init_tp_parity(struct adapter *adap)
457 struct cpl_set_tcb_field *greq;
458 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
460 t3_tp_set_offload_mode(adap, 1);
462 for (i = 0; i < 16; i++) {
463 struct cpl_smt_write_req *req;
465 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
467 skb = adap->nofail_skb;
471 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
472 memset(req, 0, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
475 req->mtu_idx = NMTUS - 1;
477 t3_mgmt_tx(adap, skb);
478 if (skb == adap->nofail_skb) {
479 await_mgmt_replies(adap, cnt, i + 1);
480 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
481 if (!adap->nofail_skb)
486 for (i = 0; i < 2048; i++) {
487 struct cpl_l2t_write_req *req;
489 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
491 skb = adap->nofail_skb;
495 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
496 memset(req, 0, sizeof(*req));
497 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
498 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
499 req->params = htonl(V_L2T_W_IDX(i));
500 t3_mgmt_tx(adap, skb);
501 if (skb == adap->nofail_skb) {
502 await_mgmt_replies(adap, cnt, 16 + i + 1);
503 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
504 if (!adap->nofail_skb)
509 for (i = 0; i < 2048; i++) {
510 struct cpl_rte_write_req *req;
512 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
514 skb = adap->nofail_skb;
518 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
519 memset(req, 0, sizeof(*req));
520 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
521 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
522 req->l2t_idx = htonl(V_L2T_W_IDX(i));
523 t3_mgmt_tx(adap, skb);
524 if (skb == adap->nofail_skb) {
525 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
526 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
527 if (!adap->nofail_skb)
532 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
534 skb = adap->nofail_skb;
538 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
539 memset(greq, 0, sizeof(*greq));
540 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542 greq->mask = cpu_to_be64(1);
543 t3_mgmt_tx(adap, skb);
545 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 if (skb == adap->nofail_skb) {
547 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
551 t3_tp_set_offload_mode(adap, 0);
555 t3_tp_set_offload_mode(adap, 0);
560 * setup_rss - configure RSS
563 * Sets up RSS to distribute packets to multiple receive queues. We
564 * configure the RSS CPU lookup table to distribute to the number of HW
565 * receive queues, and the response queue lookup table to narrow that
566 * down to the response queues actually configured for each port.
567 * We always configure the RSS mapping for two ports since the mapping
568 * table has plenty of entries.
570 static void setup_rss(struct adapter *adap)
573 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575 u8 cpus[SGE_QSETS + 1];
576 u16 rspq_map[RSS_TABLE_SIZE];
578 for (i = 0; i < SGE_QSETS; ++i)
580 cpus[SGE_QSETS] = 0xff; /* terminator */
582 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583 rspq_map[i] = i % nq0;
584 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
587 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
588 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
589 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
592 static void init_napi(struct adapter *adap)
596 for (i = 0; i < SGE_QSETS; i++) {
597 struct sge_qset *qs = &adap->sge.qs[i];
600 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
605 * netif_napi_add() can be called only once per napi_struct because it
606 * adds each new napi_struct to a list. Be careful not to call it a
607 * second time, e.g., during EEH recovery, by making a note of it.
609 adap->flags |= NAPI_INIT;
613 * Wait until all NAPI handlers are descheduled. This includes the handlers of
614 * both netdevices representing interfaces and the dummy ones for the extra
617 static void quiesce_rx(struct adapter *adap)
621 for (i = 0; i < SGE_QSETS; i++)
622 if (adap->sge.qs[i].adap)
623 napi_disable(&adap->sge.qs[i].napi);
626 static void enable_all_napi(struct adapter *adap)
629 for (i = 0; i < SGE_QSETS; i++)
630 if (adap->sge.qs[i].adap)
631 napi_enable(&adap->sge.qs[i].napi);
635 * set_qset_lro - Turn a queue set's LRO capability on and off
636 * @dev: the device the qset is attached to
637 * @qset_idx: the queue set index
638 * @val: the LRO switch
640 * Sets LRO on or off for a particular queue set.
641 * the device's features flag is updated to reflect the LRO
642 * capability when all queues belonging to the device are
645 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
647 struct port_info *pi = netdev_priv(dev);
648 struct adapter *adapter = pi->adapter;
650 adapter->params.sge.qset[qset_idx].lro = !!val;
651 adapter->sge.qs[qset_idx].lro_enabled = !!val;
655 * setup_sge_qsets - configure SGE Tx/Rx/response queues
658 * Determines how many sets of SGE queues to use and initializes them.
659 * We support multiple queue sets per port if we have MSI-X, otherwise
660 * just one queue set per port.
662 static int setup_sge_qsets(struct adapter *adap)
664 int i, j, err, irq_idx = 0, qset_idx = 0;
665 unsigned int ntxq = SGE_TXQ_PER_SET;
667 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
670 for_each_port(adap, i) {
671 struct net_device *dev = adap->port[i];
672 struct port_info *pi = netdev_priv(dev);
674 pi->qs = &adap->sge.qs[pi->first_qset];
675 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
676 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
677 err = t3_sge_alloc_qset(adap, qset_idx, 1,
678 (adap->flags & USING_MSIX) ? qset_idx + 1 :
680 &adap->params.sge.qset[qset_idx], ntxq, dev,
681 netdev_get_tx_queue(dev, j));
683 t3_free_sge_resources(adap);
692 static ssize_t attr_show(struct device *d, char *buf,
693 ssize_t(*format) (struct net_device *, char *))
697 /* Synchronize with ioctls that may shut down the device */
699 len = (*format) (to_net_dev(d), buf);
704 static ssize_t attr_store(struct device *d,
705 const char *buf, size_t len,
706 ssize_t(*set) (struct net_device *, unsigned int),
707 unsigned int min_val, unsigned int max_val)
713 if (!capable(CAP_NET_ADMIN))
716 val = simple_strtoul(buf, &endp, 0);
717 if (endp == buf || val < min_val || val > max_val)
721 ret = (*set) (to_net_dev(d), val);
728 #define CXGB3_SHOW(name, val_expr) \
729 static ssize_t format_##name(struct net_device *dev, char *buf) \
731 struct port_info *pi = netdev_priv(dev); \
732 struct adapter *adap = pi->adapter; \
733 return sprintf(buf, "%u\n", val_expr); \
735 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
738 return attr_show(d, buf, format_##name); \
741 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
743 struct port_info *pi = netdev_priv(dev);
744 struct adapter *adap = pi->adapter;
745 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
747 if (adap->flags & FULL_INIT_DONE)
749 if (val && adap->params.rev == 0)
751 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
754 adap->params.mc5.nfilters = val;
758 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
759 const char *buf, size_t len)
761 return attr_store(d, buf, len, set_nfilters, 0, ~0);
764 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
766 struct port_info *pi = netdev_priv(dev);
767 struct adapter *adap = pi->adapter;
769 if (adap->flags & FULL_INIT_DONE)
771 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
774 adap->params.mc5.nservers = val;
778 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
779 const char *buf, size_t len)
781 return attr_store(d, buf, len, set_nservers, 0, ~0);
784 #define CXGB3_ATTR_R(name, val_expr) \
785 CXGB3_SHOW(name, val_expr) \
786 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
788 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
789 CXGB3_SHOW(name, val_expr) \
790 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
792 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
793 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
794 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
796 static struct attribute *cxgb3_attrs[] = {
797 &dev_attr_cam_size.attr,
798 &dev_attr_nfilters.attr,
799 &dev_attr_nservers.attr,
803 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
805 static ssize_t tm_attr_show(struct device *d,
806 char *buf, int sched)
808 struct port_info *pi = netdev_priv(to_net_dev(d));
809 struct adapter *adap = pi->adapter;
810 unsigned int v, addr, bpt, cpt;
813 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
815 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
816 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
819 bpt = (v >> 8) & 0xff;
822 len = sprintf(buf, "disabled\n");
824 v = (adap->params.vpd.cclk * 1000) / cpt;
825 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
831 static ssize_t tm_attr_store(struct device *d,
832 const char *buf, size_t len, int sched)
834 struct port_info *pi = netdev_priv(to_net_dev(d));
835 struct adapter *adap = pi->adapter;
840 if (!capable(CAP_NET_ADMIN))
843 val = simple_strtoul(buf, &endp, 0);
844 if (endp == buf || val > 10000000)
848 ret = t3_config_sched(adap, val, sched);
855 #define TM_ATTR(name, sched) \
856 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
859 return tm_attr_show(d, buf, sched); \
861 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
862 const char *buf, size_t len) \
864 return tm_attr_store(d, buf, len, sched); \
866 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
877 static struct attribute *offload_attrs[] = {
878 &dev_attr_sched0.attr,
879 &dev_attr_sched1.attr,
880 &dev_attr_sched2.attr,
881 &dev_attr_sched3.attr,
882 &dev_attr_sched4.attr,
883 &dev_attr_sched5.attr,
884 &dev_attr_sched6.attr,
885 &dev_attr_sched7.attr,
889 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
892 * Sends an sk_buff to an offload queue driver
893 * after dealing with any active network taps.
895 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
900 ret = t3_offload_tx(tdev, skb);
905 static int write_smt_entry(struct adapter *adapter, int idx)
907 struct cpl_smt_write_req *req;
908 struct port_info *pi = netdev_priv(adapter->port[idx]);
909 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
914 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
915 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
916 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
917 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
919 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
920 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
922 offload_tx(&adapter->tdev, skb);
926 static int init_smt(struct adapter *adapter)
930 for_each_port(adapter, i)
931 write_smt_entry(adapter, i);
935 static void init_port_mtus(struct adapter *adapter)
937 unsigned int mtus = adapter->port[0]->mtu;
939 if (adapter->port[1])
940 mtus |= adapter->port[1]->mtu << 16;
941 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
944 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
948 struct mngt_pktsched_wr *req;
951 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
953 skb = adap->nofail_skb;
957 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
958 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
959 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
965 ret = t3_mgmt_tx(adap, skb);
966 if (skb == adap->nofail_skb) {
967 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
969 if (!adap->nofail_skb)
976 static int bind_qsets(struct adapter *adap)
980 for_each_port(adap, i) {
981 const struct port_info *pi = adap2pinfo(adap, i);
983 for (j = 0; j < pi->nqsets; ++j) {
984 int ret = send_pktsched_cmd(adap, 1,
985 pi->first_qset + j, -1,
995 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
996 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
997 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
998 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
999 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001 static inline const char *get_edc_fw_name(int edc_idx)
1003 const char *fw_name = NULL;
1006 case EDC_OPT_AEL2005:
1007 fw_name = AEL2005_OPT_EDC_NAME;
1009 case EDC_TWX_AEL2005:
1010 fw_name = AEL2005_TWX_EDC_NAME;
1012 case EDC_TWX_AEL2020:
1013 fw_name = AEL2020_TWX_EDC_NAME;
1019 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1021 struct adapter *adapter = phy->adapter;
1022 const struct firmware *fw;
1026 u16 *cache = phy->phy_cache;
1029 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1031 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1033 dev_err(&adapter->pdev->dev,
1034 "could not upgrade firmware: unable to load %s\n",
1039 /* check size, take checksum in account */
1040 if (fw->size > size + 4) {
1041 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1042 (unsigned int)fw->size, size + 4);
1046 /* compute checksum */
1047 p = (const __be32 *)fw->data;
1048 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1049 csum += ntohl(p[i]);
1051 if (csum != 0xffffffff) {
1052 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1057 for (i = 0; i < size / 4 ; i++) {
1058 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1059 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1062 release_firmware(fw);
1067 static int upgrade_fw(struct adapter *adap)
1071 const struct firmware *fw;
1072 struct device *dev = &adap->pdev->dev;
1074 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
1075 FW_VERSION_MINOR, FW_VERSION_MICRO);
1076 ret = request_firmware(&fw, buf, dev);
1078 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1082 ret = t3_load_fw(adap, fw->data, fw->size);
1083 release_firmware(fw);
1086 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1087 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1089 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1090 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1095 static inline char t3rev2char(struct adapter *adapter)
1099 switch(adapter->params.rev) {
1111 static int update_tpsram(struct adapter *adap)
1113 const struct firmware *tpsram;
1115 struct device *dev = &adap->pdev->dev;
1119 rev = t3rev2char(adap);
1123 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1124 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1126 ret = request_firmware(&tpsram, buf, dev);
1128 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1133 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1135 goto release_tpsram;
1137 ret = t3_set_proto_sram(adap, tpsram->data);
1140 "successful update of protocol engine "
1142 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1144 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1145 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1147 dev_err(dev, "loading protocol SRAM failed\n");
1150 release_firmware(tpsram);
1156 * cxgb_up - enable the adapter
1157 * @adapter: adapter being enabled
1159 * Called when the first port is enabled, this function performs the
1160 * actions necessary to make an adapter operational, such as completing
1161 * the initialization of HW modules, and enabling interrupts.
1163 * Must be called with the rtnl lock held.
1165 static int cxgb_up(struct adapter *adap)
1169 if (!(adap->flags & FULL_INIT_DONE)) {
1170 err = t3_check_fw_version(adap);
1171 if (err == -EINVAL) {
1172 err = upgrade_fw(adap);
1173 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1174 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1175 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1178 err = t3_check_tpsram_version(adap);
1179 if (err == -EINVAL) {
1180 err = update_tpsram(adap);
1181 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1182 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1183 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1187 * Clear interrupts now to catch errors if t3_init_hw fails.
1188 * We clear them again later as initialization may trigger
1189 * conditions that can interrupt.
1191 t3_intr_clear(adap);
1193 err = t3_init_hw(adap, 0);
1197 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1198 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1200 err = setup_sge_qsets(adap);
1205 if (!(adap->flags & NAPI_INIT))
1208 t3_start_sge_timers(adap);
1209 adap->flags |= FULL_INIT_DONE;
1212 t3_intr_clear(adap);
1214 if (adap->flags & USING_MSIX) {
1215 name_msix_vecs(adap);
1216 err = request_irq(adap->msix_info[0].vec,
1217 t3_async_intr_handler, 0,
1218 adap->msix_info[0].desc, adap);
1222 err = request_msix_data_irqs(adap);
1224 free_irq(adap->msix_info[0].vec, adap);
1227 } else if ((err = request_irq(adap->pdev->irq,
1228 t3_intr_handler(adap,
1229 adap->sge.qs[0].rspq.
1231 (adap->flags & USING_MSI) ?
1236 enable_all_napi(adap);
1238 t3_intr_enable(adap);
1240 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1241 is_offload(adap) && init_tp_parity(adap) == 0)
1242 adap->flags |= TP_PARITY_INIT;
1244 if (adap->flags & TP_PARITY_INIT) {
1245 t3_write_reg(adap, A_TP_INT_CAUSE,
1246 F_CMCACHEPERR | F_ARPLUTPERR);
1247 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1250 if (!(adap->flags & QUEUES_BOUND)) {
1251 err = bind_qsets(adap);
1253 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1254 t3_intr_disable(adap);
1255 free_irq_resources(adap);
1258 adap->flags |= QUEUES_BOUND;
1264 CH_ERR(adap, "request_irq failed, err %d\n", err);
1269 * Release resources when all the ports and offloading have been stopped.
1271 static void cxgb_down(struct adapter *adapter)
1273 t3_sge_stop(adapter);
1274 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1275 t3_intr_disable(adapter);
1276 spin_unlock_irq(&adapter->work_lock);
1278 free_irq_resources(adapter);
1279 quiesce_rx(adapter);
1280 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1283 static void schedule_chk_task(struct adapter *adap)
1287 timeo = adap->params.linkpoll_period ?
1288 (HZ * adap->params.linkpoll_period) / 10 :
1289 adap->params.stats_update_period * HZ;
1291 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1294 static int offload_open(struct net_device *dev)
1296 struct port_info *pi = netdev_priv(dev);
1297 struct adapter *adapter = pi->adapter;
1298 struct t3cdev *tdev = dev2t3cdev(dev);
1299 int adap_up = adapter->open_device_map & PORT_MASK;
1302 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1305 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1308 t3_tp_set_offload_mode(adapter, 1);
1309 tdev->lldev = adapter->port[0];
1310 err = cxgb3_offload_activate(adapter);
1314 init_port_mtus(adapter);
1315 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1316 adapter->params.b_wnd,
1317 adapter->params.rev == 0 ?
1318 adapter->port[0]->mtu : 0xffff);
1321 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1322 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1324 /* Call back all registered clients */
1325 cxgb3_add_clients(tdev);
1328 /* restore them in case the offload module has changed them */
1330 t3_tp_set_offload_mode(adapter, 0);
1331 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1332 cxgb3_set_dummy_ops(tdev);
1337 static int offload_close(struct t3cdev *tdev)
1339 struct adapter *adapter = tdev2adap(tdev);
1341 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1344 /* Call back all registered clients */
1345 cxgb3_remove_clients(tdev);
1347 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1349 /* Flush work scheduled while releasing TIDs */
1350 flush_scheduled_work();
1353 cxgb3_set_dummy_ops(tdev);
1354 t3_tp_set_offload_mode(adapter, 0);
1355 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1357 if (!adapter->open_device_map)
1360 cxgb3_offload_deactivate(adapter);
1364 static int cxgb_open(struct net_device *dev)
1366 struct port_info *pi = netdev_priv(dev);
1367 struct adapter *adapter = pi->adapter;
1368 int other_ports = adapter->open_device_map & PORT_MASK;
1371 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1374 set_bit(pi->port_id, &adapter->open_device_map);
1375 if (is_offload(adapter) && !ofld_disable) {
1376 err = offload_open(dev);
1379 "Could not initialize offload capabilities\n");
1382 dev->real_num_tx_queues = pi->nqsets;
1384 t3_port_intr_enable(adapter, pi->port_id);
1385 netif_tx_start_all_queues(dev);
1387 schedule_chk_task(adapter);
1389 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1393 static int cxgb_close(struct net_device *dev)
1395 struct port_info *pi = netdev_priv(dev);
1396 struct adapter *adapter = pi->adapter;
1399 if (!adapter->open_device_map)
1402 /* Stop link fault interrupts */
1403 t3_xgm_intr_disable(adapter, pi->port_id);
1404 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1406 t3_port_intr_disable(adapter, pi->port_id);
1407 netif_tx_stop_all_queues(dev);
1408 pi->phy.ops->power_down(&pi->phy, 1);
1409 netif_carrier_off(dev);
1410 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1412 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1413 clear_bit(pi->port_id, &adapter->open_device_map);
1414 spin_unlock_irq(&adapter->work_lock);
1416 if (!(adapter->open_device_map & PORT_MASK))
1417 cancel_delayed_work_sync(&adapter->adap_check_task);
1419 if (!adapter->open_device_map)
1422 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1426 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1428 struct port_info *pi = netdev_priv(dev);
1429 struct adapter *adapter = pi->adapter;
1430 struct net_device_stats *ns = &pi->netstats;
1431 const struct mac_stats *pstats;
1433 spin_lock(&adapter->stats_lock);
1434 pstats = t3_mac_update_stats(&pi->mac);
1435 spin_unlock(&adapter->stats_lock);
1437 ns->tx_bytes = pstats->tx_octets;
1438 ns->tx_packets = pstats->tx_frames;
1439 ns->rx_bytes = pstats->rx_octets;
1440 ns->rx_packets = pstats->rx_frames;
1441 ns->multicast = pstats->rx_mcast_frames;
1443 ns->tx_errors = pstats->tx_underrun;
1444 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1445 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1446 pstats->rx_fifo_ovfl;
1448 /* detailed rx_errors */
1449 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1450 ns->rx_over_errors = 0;
1451 ns->rx_crc_errors = pstats->rx_fcs_errs;
1452 ns->rx_frame_errors = pstats->rx_symbol_errs;
1453 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1454 ns->rx_missed_errors = pstats->rx_cong_drops;
1456 /* detailed tx_errors */
1457 ns->tx_aborted_errors = 0;
1458 ns->tx_carrier_errors = 0;
1459 ns->tx_fifo_errors = pstats->tx_underrun;
1460 ns->tx_heartbeat_errors = 0;
1461 ns->tx_window_errors = 0;
1465 static u32 get_msglevel(struct net_device *dev)
1467 struct port_info *pi = netdev_priv(dev);
1468 struct adapter *adapter = pi->adapter;
1470 return adapter->msg_enable;
1473 static void set_msglevel(struct net_device *dev, u32 val)
1475 struct port_info *pi = netdev_priv(dev);
1476 struct adapter *adapter = pi->adapter;
1478 adapter->msg_enable = val;
1481 static char stats_strings[][ETH_GSTRING_LEN] = {
1484 "TxMulticastFramesOK",
1485 "TxBroadcastFramesOK",
1492 "TxFrames128To255 ",
1493 "TxFrames256To511 ",
1494 "TxFrames512To1023 ",
1495 "TxFrames1024To1518 ",
1496 "TxFrames1519ToMax ",
1500 "RxMulticastFramesOK",
1501 "RxBroadcastFramesOK",
1512 "RxFrames128To255 ",
1513 "RxFrames256To511 ",
1514 "RxFrames512To1023 ",
1515 "RxFrames1024To1518 ",
1516 "RxFrames1519ToMax ",
1529 "CheckTXEnToggled ",
1535 static int get_sset_count(struct net_device *dev, int sset)
1539 return ARRAY_SIZE(stats_strings);
1545 #define T3_REGMAP_SIZE (3 * 1024)
1547 static int get_regs_len(struct net_device *dev)
1549 return T3_REGMAP_SIZE;
1552 static int get_eeprom_len(struct net_device *dev)
1557 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1559 struct port_info *pi = netdev_priv(dev);
1560 struct adapter *adapter = pi->adapter;
1564 spin_lock(&adapter->stats_lock);
1565 t3_get_fw_version(adapter, &fw_vers);
1566 t3_get_tp_version(adapter, &tp_vers);
1567 spin_unlock(&adapter->stats_lock);
1569 strcpy(info->driver, DRV_NAME);
1570 strcpy(info->version, DRV_VERSION);
1571 strcpy(info->bus_info, pci_name(adapter->pdev));
1573 strcpy(info->fw_version, "N/A");
1575 snprintf(info->fw_version, sizeof(info->fw_version),
1576 "%s %u.%u.%u TP %u.%u.%u",
1577 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1578 G_FW_VERSION_MAJOR(fw_vers),
1579 G_FW_VERSION_MINOR(fw_vers),
1580 G_FW_VERSION_MICRO(fw_vers),
1581 G_TP_VERSION_MAJOR(tp_vers),
1582 G_TP_VERSION_MINOR(tp_vers),
1583 G_TP_VERSION_MICRO(tp_vers));
1587 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1589 if (stringset == ETH_SS_STATS)
1590 memcpy(data, stats_strings, sizeof(stats_strings));
1593 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1594 struct port_info *p, int idx)
1597 unsigned long tot = 0;
1599 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1600 tot += adapter->sge.qs[i].port_stats[idx];
1604 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1607 struct port_info *pi = netdev_priv(dev);
1608 struct adapter *adapter = pi->adapter;
1609 const struct mac_stats *s;
1611 spin_lock(&adapter->stats_lock);
1612 s = t3_mac_update_stats(&pi->mac);
1613 spin_unlock(&adapter->stats_lock);
1615 *data++ = s->tx_octets;
1616 *data++ = s->tx_frames;
1617 *data++ = s->tx_mcast_frames;
1618 *data++ = s->tx_bcast_frames;
1619 *data++ = s->tx_pause;
1620 *data++ = s->tx_underrun;
1621 *data++ = s->tx_fifo_urun;
1623 *data++ = s->tx_frames_64;
1624 *data++ = s->tx_frames_65_127;
1625 *data++ = s->tx_frames_128_255;
1626 *data++ = s->tx_frames_256_511;
1627 *data++ = s->tx_frames_512_1023;
1628 *data++ = s->tx_frames_1024_1518;
1629 *data++ = s->tx_frames_1519_max;
1631 *data++ = s->rx_octets;
1632 *data++ = s->rx_frames;
1633 *data++ = s->rx_mcast_frames;
1634 *data++ = s->rx_bcast_frames;
1635 *data++ = s->rx_pause;
1636 *data++ = s->rx_fcs_errs;
1637 *data++ = s->rx_symbol_errs;
1638 *data++ = s->rx_short;
1639 *data++ = s->rx_jabber;
1640 *data++ = s->rx_too_long;
1641 *data++ = s->rx_fifo_ovfl;
1643 *data++ = s->rx_frames_64;
1644 *data++ = s->rx_frames_65_127;
1645 *data++ = s->rx_frames_128_255;
1646 *data++ = s->rx_frames_256_511;
1647 *data++ = s->rx_frames_512_1023;
1648 *data++ = s->rx_frames_1024_1518;
1649 *data++ = s->rx_frames_1519_max;
1651 *data++ = pi->phy.fifo_errors;
1653 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1654 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1655 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1656 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1657 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1661 *data++ = s->rx_cong_drops;
1663 *data++ = s->num_toggled;
1664 *data++ = s->num_resets;
1666 *data++ = s->link_faults;
1669 static inline void reg_block_dump(struct adapter *ap, void *buf,
1670 unsigned int start, unsigned int end)
1672 u32 *p = buf + start;
1674 for (; start <= end; start += sizeof(u32))
1675 *p++ = t3_read_reg(ap, start);
1678 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1681 struct port_info *pi = netdev_priv(dev);
1682 struct adapter *ap = pi->adapter;
1686 * bits 0..9: chip version
1687 * bits 10..15: chip revision
1688 * bit 31: set for PCIe cards
1690 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1693 * We skip the MAC statistics registers because they are clear-on-read.
1694 * Also reading multi-register stats would need to synchronize with the
1695 * periodic mac stats accumulation. Hard to justify the complexity.
1697 memset(buf, 0, T3_REGMAP_SIZE);
1698 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1699 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1700 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1701 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1702 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1703 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1704 XGM_REG(A_XGM_SERDES_STAT3, 1));
1705 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1706 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1709 static int restart_autoneg(struct net_device *dev)
1711 struct port_info *p = netdev_priv(dev);
1713 if (!netif_running(dev))
1715 if (p->link_config.autoneg != AUTONEG_ENABLE)
1717 p->phy.ops->autoneg_restart(&p->phy);
1721 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1723 struct port_info *pi = netdev_priv(dev);
1724 struct adapter *adapter = pi->adapter;
1730 for (i = 0; i < data * 2; i++) {
1731 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1732 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1733 if (msleep_interruptible(500))
1736 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1741 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1743 struct port_info *p = netdev_priv(dev);
1745 cmd->supported = p->link_config.supported;
1746 cmd->advertising = p->link_config.advertising;
1748 if (netif_carrier_ok(dev)) {
1749 cmd->speed = p->link_config.speed;
1750 cmd->duplex = p->link_config.duplex;
1756 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1757 cmd->phy_address = p->phy.mdio.prtad;
1758 cmd->transceiver = XCVR_EXTERNAL;
1759 cmd->autoneg = p->link_config.autoneg;
1765 static int speed_duplex_to_caps(int speed, int duplex)
1771 if (duplex == DUPLEX_FULL)
1772 cap = SUPPORTED_10baseT_Full;
1774 cap = SUPPORTED_10baseT_Half;
1777 if (duplex == DUPLEX_FULL)
1778 cap = SUPPORTED_100baseT_Full;
1780 cap = SUPPORTED_100baseT_Half;
1783 if (duplex == DUPLEX_FULL)
1784 cap = SUPPORTED_1000baseT_Full;
1786 cap = SUPPORTED_1000baseT_Half;
1789 if (duplex == DUPLEX_FULL)
1790 cap = SUPPORTED_10000baseT_Full;
1795 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1796 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1797 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1798 ADVERTISED_10000baseT_Full)
1800 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1802 struct port_info *p = netdev_priv(dev);
1803 struct link_config *lc = &p->link_config;
1805 if (!(lc->supported & SUPPORTED_Autoneg)) {
1807 * PHY offers a single speed/duplex. See if that's what's
1810 if (cmd->autoneg == AUTONEG_DISABLE) {
1811 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1812 if (lc->supported & cap)
1818 if (cmd->autoneg == AUTONEG_DISABLE) {
1819 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1821 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1823 lc->requested_speed = cmd->speed;
1824 lc->requested_duplex = cmd->duplex;
1825 lc->advertising = 0;
1827 cmd->advertising &= ADVERTISED_MASK;
1828 cmd->advertising &= lc->supported;
1829 if (!cmd->advertising)
1831 lc->requested_speed = SPEED_INVALID;
1832 lc->requested_duplex = DUPLEX_INVALID;
1833 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1835 lc->autoneg = cmd->autoneg;
1836 if (netif_running(dev))
1837 t3_link_start(&p->phy, &p->mac, lc);
1841 static void get_pauseparam(struct net_device *dev,
1842 struct ethtool_pauseparam *epause)
1844 struct port_info *p = netdev_priv(dev);
1846 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1847 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1848 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1851 static int set_pauseparam(struct net_device *dev,
1852 struct ethtool_pauseparam *epause)
1854 struct port_info *p = netdev_priv(dev);
1855 struct link_config *lc = &p->link_config;
1857 if (epause->autoneg == AUTONEG_DISABLE)
1858 lc->requested_fc = 0;
1859 else if (lc->supported & SUPPORTED_Autoneg)
1860 lc->requested_fc = PAUSE_AUTONEG;
1864 if (epause->rx_pause)
1865 lc->requested_fc |= PAUSE_RX;
1866 if (epause->tx_pause)
1867 lc->requested_fc |= PAUSE_TX;
1868 if (lc->autoneg == AUTONEG_ENABLE) {
1869 if (netif_running(dev))
1870 t3_link_start(&p->phy, &p->mac, lc);
1872 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1873 if (netif_running(dev))
1874 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1879 static u32 get_rx_csum(struct net_device *dev)
1881 struct port_info *p = netdev_priv(dev);
1883 return p->rx_offload & T3_RX_CSUM;
1886 static int set_rx_csum(struct net_device *dev, u32 data)
1888 struct port_info *p = netdev_priv(dev);
1891 p->rx_offload |= T3_RX_CSUM;
1895 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1896 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1897 set_qset_lro(dev, i, 0);
1902 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1904 struct port_info *pi = netdev_priv(dev);
1905 struct adapter *adapter = pi->adapter;
1906 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1908 e->rx_max_pending = MAX_RX_BUFFERS;
1909 e->rx_mini_max_pending = 0;
1910 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1911 e->tx_max_pending = MAX_TXQ_ENTRIES;
1913 e->rx_pending = q->fl_size;
1914 e->rx_mini_pending = q->rspq_size;
1915 e->rx_jumbo_pending = q->jumbo_size;
1916 e->tx_pending = q->txq_size[0];
1919 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1921 struct port_info *pi = netdev_priv(dev);
1922 struct adapter *adapter = pi->adapter;
1923 struct qset_params *q;
1926 if (e->rx_pending > MAX_RX_BUFFERS ||
1927 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1928 e->tx_pending > MAX_TXQ_ENTRIES ||
1929 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1930 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1931 e->rx_pending < MIN_FL_ENTRIES ||
1932 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1933 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1936 if (adapter->flags & FULL_INIT_DONE)
1939 q = &adapter->params.sge.qset[pi->first_qset];
1940 for (i = 0; i < pi->nqsets; ++i, ++q) {
1941 q->rspq_size = e->rx_mini_pending;
1942 q->fl_size = e->rx_pending;
1943 q->jumbo_size = e->rx_jumbo_pending;
1944 q->txq_size[0] = e->tx_pending;
1945 q->txq_size[1] = e->tx_pending;
1946 q->txq_size[2] = e->tx_pending;
1951 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1953 struct port_info *pi = netdev_priv(dev);
1954 struct adapter *adapter = pi->adapter;
1955 struct qset_params *qsp = &adapter->params.sge.qset[0];
1956 struct sge_qset *qs = &adapter->sge.qs[0];
1958 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1961 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1962 t3_update_qset_coalesce(qs, qsp);
1966 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1968 struct port_info *pi = netdev_priv(dev);
1969 struct adapter *adapter = pi->adapter;
1970 struct qset_params *q = adapter->params.sge.qset;
1972 c->rx_coalesce_usecs = q->coalesce_usecs;
1976 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1979 struct port_info *pi = netdev_priv(dev);
1980 struct adapter *adapter = pi->adapter;
1983 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1987 e->magic = EEPROM_MAGIC;
1988 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1989 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1992 memcpy(data, buf + e->offset, e->len);
1997 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2000 struct port_info *pi = netdev_priv(dev);
2001 struct adapter *adapter = pi->adapter;
2002 u32 aligned_offset, aligned_len;
2007 if (eeprom->magic != EEPROM_MAGIC)
2010 aligned_offset = eeprom->offset & ~3;
2011 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2013 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2014 buf = kmalloc(aligned_len, GFP_KERNEL);
2017 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2018 if (!err && aligned_len > 4)
2019 err = t3_seeprom_read(adapter,
2020 aligned_offset + aligned_len - 4,
2021 (__le32 *) & buf[aligned_len - 4]);
2024 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2028 err = t3_seeprom_wp(adapter, 0);
2032 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2033 err = t3_seeprom_write(adapter, aligned_offset, *p);
2034 aligned_offset += 4;
2038 err = t3_seeprom_wp(adapter, 1);
2045 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2049 memset(&wol->sopass, 0, sizeof(wol->sopass));
2052 static const struct ethtool_ops cxgb_ethtool_ops = {
2053 .get_settings = get_settings,
2054 .set_settings = set_settings,
2055 .get_drvinfo = get_drvinfo,
2056 .get_msglevel = get_msglevel,
2057 .set_msglevel = set_msglevel,
2058 .get_ringparam = get_sge_param,
2059 .set_ringparam = set_sge_param,
2060 .get_coalesce = get_coalesce,
2061 .set_coalesce = set_coalesce,
2062 .get_eeprom_len = get_eeprom_len,
2063 .get_eeprom = get_eeprom,
2064 .set_eeprom = set_eeprom,
2065 .get_pauseparam = get_pauseparam,
2066 .set_pauseparam = set_pauseparam,
2067 .get_rx_csum = get_rx_csum,
2068 .set_rx_csum = set_rx_csum,
2069 .set_tx_csum = ethtool_op_set_tx_csum,
2070 .set_sg = ethtool_op_set_sg,
2071 .get_link = ethtool_op_get_link,
2072 .get_strings = get_strings,
2073 .phys_id = cxgb3_phys_id,
2074 .nway_reset = restart_autoneg,
2075 .get_sset_count = get_sset_count,
2076 .get_ethtool_stats = get_stats,
2077 .get_regs_len = get_regs_len,
2078 .get_regs = get_regs,
2080 .set_tso = ethtool_op_set_tso,
2083 static int in_range(int val, int lo, int hi)
2085 return val < 0 || (val <= hi && val >= lo);
2088 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2090 struct port_info *pi = netdev_priv(dev);
2091 struct adapter *adapter = pi->adapter;
2095 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2099 case CHELSIO_SET_QSET_PARAMS:{
2101 struct qset_params *q;
2102 struct ch_qset_params t;
2103 int q1 = pi->first_qset;
2104 int nqsets = pi->nqsets;
2106 if (!capable(CAP_NET_ADMIN))
2108 if (copy_from_user(&t, useraddr, sizeof(t)))
2110 if (t.qset_idx >= SGE_QSETS)
2112 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2113 !in_range(t.cong_thres, 0, 255) ||
2114 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2116 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2118 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2119 MAX_CTRL_TXQ_ENTRIES) ||
2120 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2122 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2123 MAX_RX_JUMBO_BUFFERS)
2124 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2128 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2129 for_each_port(adapter, i) {
2130 pi = adap2pinfo(adapter, i);
2131 if (t.qset_idx >= pi->first_qset &&
2132 t.qset_idx < pi->first_qset + pi->nqsets &&
2133 !(pi->rx_offload & T3_RX_CSUM))
2137 if ((adapter->flags & FULL_INIT_DONE) &&
2138 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2139 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2140 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2141 t.polling >= 0 || t.cong_thres >= 0))
2144 /* Allow setting of any available qset when offload enabled */
2145 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2147 for_each_port(adapter, i) {
2148 pi = adap2pinfo(adapter, i);
2149 nqsets += pi->first_qset + pi->nqsets;
2153 if (t.qset_idx < q1)
2155 if (t.qset_idx > q1 + nqsets - 1)
2158 q = &adapter->params.sge.qset[t.qset_idx];
2160 if (t.rspq_size >= 0)
2161 q->rspq_size = t.rspq_size;
2162 if (t.fl_size[0] >= 0)
2163 q->fl_size = t.fl_size[0];
2164 if (t.fl_size[1] >= 0)
2165 q->jumbo_size = t.fl_size[1];
2166 if (t.txq_size[0] >= 0)
2167 q->txq_size[0] = t.txq_size[0];
2168 if (t.txq_size[1] >= 0)
2169 q->txq_size[1] = t.txq_size[1];
2170 if (t.txq_size[2] >= 0)
2171 q->txq_size[2] = t.txq_size[2];
2172 if (t.cong_thres >= 0)
2173 q->cong_thres = t.cong_thres;
2174 if (t.intr_lat >= 0) {
2175 struct sge_qset *qs =
2176 &adapter->sge.qs[t.qset_idx];
2178 q->coalesce_usecs = t.intr_lat;
2179 t3_update_qset_coalesce(qs, q);
2181 if (t.polling >= 0) {
2182 if (adapter->flags & USING_MSIX)
2183 q->polling = t.polling;
2185 /* No polling with INTx for T3A */
2186 if (adapter->params.rev == 0 &&
2187 !(adapter->flags & USING_MSI))
2190 for (i = 0; i < SGE_QSETS; i++) {
2191 q = &adapter->params.sge.
2193 q->polling = t.polling;
2198 set_qset_lro(dev, t.qset_idx, t.lro);
2202 case CHELSIO_GET_QSET_PARAMS:{
2203 struct qset_params *q;
2204 struct ch_qset_params t;
2205 int q1 = pi->first_qset;
2206 int nqsets = pi->nqsets;
2209 if (copy_from_user(&t, useraddr, sizeof(t)))
2212 /* Display qsets for all ports when offload enabled */
2213 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2215 for_each_port(adapter, i) {
2216 pi = adap2pinfo(adapter, i);
2217 nqsets = pi->first_qset + pi->nqsets;
2221 if (t.qset_idx >= nqsets)
2224 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2225 t.rspq_size = q->rspq_size;
2226 t.txq_size[0] = q->txq_size[0];
2227 t.txq_size[1] = q->txq_size[1];
2228 t.txq_size[2] = q->txq_size[2];
2229 t.fl_size[0] = q->fl_size;
2230 t.fl_size[1] = q->jumbo_size;
2231 t.polling = q->polling;
2233 t.intr_lat = q->coalesce_usecs;
2234 t.cong_thres = q->cong_thres;
2237 if (adapter->flags & USING_MSIX)
2238 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2240 t.vector = adapter->pdev->irq;
2242 if (copy_to_user(useraddr, &t, sizeof(t)))
2246 case CHELSIO_SET_QSET_NUM:{
2247 struct ch_reg edata;
2248 unsigned int i, first_qset = 0, other_qsets = 0;
2250 if (!capable(CAP_NET_ADMIN))
2252 if (adapter->flags & FULL_INIT_DONE)
2254 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2256 if (edata.val < 1 ||
2257 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2260 for_each_port(adapter, i)
2261 if (adapter->port[i] && adapter->port[i] != dev)
2262 other_qsets += adap2pinfo(adapter, i)->nqsets;
2264 if (edata.val + other_qsets > SGE_QSETS)
2267 pi->nqsets = edata.val;
2269 for_each_port(adapter, i)
2270 if (adapter->port[i]) {
2271 pi = adap2pinfo(adapter, i);
2272 pi->first_qset = first_qset;
2273 first_qset += pi->nqsets;
2277 case CHELSIO_GET_QSET_NUM:{
2278 struct ch_reg edata;
2280 edata.cmd = CHELSIO_GET_QSET_NUM;
2281 edata.val = pi->nqsets;
2282 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2286 case CHELSIO_LOAD_FW:{
2288 struct ch_mem_range t;
2290 if (!capable(CAP_SYS_RAWIO))
2292 if (copy_from_user(&t, useraddr, sizeof(t)))
2294 /* Check t.len sanity ? */
2295 fw_data = kmalloc(t.len, GFP_KERNEL);
2300 (fw_data, useraddr + sizeof(t), t.len)) {
2305 ret = t3_load_fw(adapter, fw_data, t.len);
2311 case CHELSIO_SETMTUTAB:{
2315 if (!is_offload(adapter))
2317 if (!capable(CAP_NET_ADMIN))
2319 if (offload_running(adapter))
2321 if (copy_from_user(&m, useraddr, sizeof(m)))
2323 if (m.nmtus != NMTUS)
2325 if (m.mtus[0] < 81) /* accommodate SACK */
2328 /* MTUs must be in ascending order */
2329 for (i = 1; i < NMTUS; ++i)
2330 if (m.mtus[i] < m.mtus[i - 1])
2333 memcpy(adapter->params.mtus, m.mtus,
2334 sizeof(adapter->params.mtus));
2337 case CHELSIO_GET_PM:{
2338 struct tp_params *p = &adapter->params.tp;
2339 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2341 if (!is_offload(adapter))
2343 m.tx_pg_sz = p->tx_pg_size;
2344 m.tx_num_pg = p->tx_num_pgs;
2345 m.rx_pg_sz = p->rx_pg_size;
2346 m.rx_num_pg = p->rx_num_pgs;
2347 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2348 if (copy_to_user(useraddr, &m, sizeof(m)))
2352 case CHELSIO_SET_PM:{
2354 struct tp_params *p = &adapter->params.tp;
2356 if (!is_offload(adapter))
2358 if (!capable(CAP_NET_ADMIN))
2360 if (adapter->flags & FULL_INIT_DONE)
2362 if (copy_from_user(&m, useraddr, sizeof(m)))
2364 if (!is_power_of_2(m.rx_pg_sz) ||
2365 !is_power_of_2(m.tx_pg_sz))
2366 return -EINVAL; /* not power of 2 */
2367 if (!(m.rx_pg_sz & 0x14000))
2368 return -EINVAL; /* not 16KB or 64KB */
2369 if (!(m.tx_pg_sz & 0x1554000))
2371 if (m.tx_num_pg == -1)
2372 m.tx_num_pg = p->tx_num_pgs;
2373 if (m.rx_num_pg == -1)
2374 m.rx_num_pg = p->rx_num_pgs;
2375 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2377 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2378 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2380 p->rx_pg_size = m.rx_pg_sz;
2381 p->tx_pg_size = m.tx_pg_sz;
2382 p->rx_num_pgs = m.rx_num_pg;
2383 p->tx_num_pgs = m.tx_num_pg;
2386 case CHELSIO_GET_MEM:{
2387 struct ch_mem_range t;
2391 if (!is_offload(adapter))
2393 if (!(adapter->flags & FULL_INIT_DONE))
2394 return -EIO; /* need the memory controllers */
2395 if (copy_from_user(&t, useraddr, sizeof(t)))
2397 if ((t.addr & 7) || (t.len & 7))
2399 if (t.mem_id == MEM_CM)
2401 else if (t.mem_id == MEM_PMRX)
2402 mem = &adapter->pmrx;
2403 else if (t.mem_id == MEM_PMTX)
2404 mem = &adapter->pmtx;
2410 * bits 0..9: chip version
2411 * bits 10..15: chip revision
2413 t.version = 3 | (adapter->params.rev << 10);
2414 if (copy_to_user(useraddr, &t, sizeof(t)))
2418 * Read 256 bytes at a time as len can be large and we don't
2419 * want to use huge intermediate buffers.
2421 useraddr += sizeof(t); /* advance to start of buffer */
2423 unsigned int chunk =
2424 min_t(unsigned int, t.len, sizeof(buf));
2427 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2431 if (copy_to_user(useraddr, buf, chunk))
2439 case CHELSIO_SET_TRACE_FILTER:{
2441 const struct trace_params *tp;
2443 if (!capable(CAP_NET_ADMIN))
2445 if (!offload_running(adapter))
2447 if (copy_from_user(&t, useraddr, sizeof(t)))
2450 tp = (const struct trace_params *)&t.sip;
2452 t3_config_trace_filter(adapter, tp, 0,
2456 t3_config_trace_filter(adapter, tp, 1,
2467 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2469 struct mii_ioctl_data *data = if_mii(req);
2470 struct port_info *pi = netdev_priv(dev);
2471 struct adapter *adapter = pi->adapter;
2476 /* Convert phy_id from older PRTAD/DEVAD format */
2477 if (is_10G(adapter) &&
2478 !mdio_phy_id_is_c45(data->phy_id) &&
2479 (data->phy_id & 0x1f00) &&
2480 !(data->phy_id & 0xe0e0))
2481 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2482 data->phy_id & 0x1f);
2485 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2487 return cxgb_extension_ioctl(dev, req->ifr_data);
2493 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2495 struct port_info *pi = netdev_priv(dev);
2496 struct adapter *adapter = pi->adapter;
2499 if (new_mtu < 81) /* accommodate SACK */
2501 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2504 init_port_mtus(adapter);
2505 if (adapter->params.rev == 0 && offload_running(adapter))
2506 t3_load_mtus(adapter, adapter->params.mtus,
2507 adapter->params.a_wnd, adapter->params.b_wnd,
2508 adapter->port[0]->mtu);
2512 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2514 struct port_info *pi = netdev_priv(dev);
2515 struct adapter *adapter = pi->adapter;
2516 struct sockaddr *addr = p;
2518 if (!is_valid_ether_addr(addr->sa_data))
2521 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2522 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2523 if (offload_running(adapter))
2524 write_smt_entry(adapter, pi->port_id);
2529 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2530 * @adap: the adapter
2533 * Ensures that current Rx processing on any of the queues associated with
2534 * the given port completes before returning. We do this by acquiring and
2535 * releasing the locks of the response queues associated with the port.
2537 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2541 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2542 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2544 spin_lock_irq(&q->lock);
2545 spin_unlock_irq(&q->lock);
2549 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2551 struct port_info *pi = netdev_priv(dev);
2552 struct adapter *adapter = pi->adapter;
2555 if (adapter->params.rev > 0)
2556 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2558 /* single control for all ports */
2559 unsigned int i, have_vlans = 0;
2560 for_each_port(adapter, i)
2561 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2563 t3_set_vlan_accel(adapter, 1, have_vlans);
2565 t3_synchronize_rx(adapter, pi);
2568 #ifdef CONFIG_NET_POLL_CONTROLLER
2569 static void cxgb_netpoll(struct net_device *dev)
2571 struct port_info *pi = netdev_priv(dev);
2572 struct adapter *adapter = pi->adapter;
2575 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2576 struct sge_qset *qs = &adapter->sge.qs[qidx];
2579 if (adapter->flags & USING_MSIX)
2584 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2590 * Periodic accumulation of MAC statistics.
2592 static void mac_stats_update(struct adapter *adapter)
2596 for_each_port(adapter, i) {
2597 struct net_device *dev = adapter->port[i];
2598 struct port_info *p = netdev_priv(dev);
2600 if (netif_running(dev)) {
2601 spin_lock(&adapter->stats_lock);
2602 t3_mac_update_stats(&p->mac);
2603 spin_unlock(&adapter->stats_lock);
2608 static void check_link_status(struct adapter *adapter)
2612 for_each_port(adapter, i) {
2613 struct net_device *dev = adapter->port[i];
2614 struct port_info *p = netdev_priv(dev);
2617 spin_lock_irq(&adapter->work_lock);
2618 link_fault = p->link_fault;
2619 spin_unlock_irq(&adapter->work_lock);
2622 t3_link_fault(adapter, i);
2626 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2627 t3_xgm_intr_disable(adapter, i);
2628 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2630 t3_link_changed(adapter, i);
2631 t3_xgm_intr_enable(adapter, i);
2636 static void check_t3b2_mac(struct adapter *adapter)
2640 if (!rtnl_trylock()) /* synchronize with ifdown */
2643 for_each_port(adapter, i) {
2644 struct net_device *dev = adapter->port[i];
2645 struct port_info *p = netdev_priv(dev);
2648 if (!netif_running(dev))
2652 if (netif_running(dev) && netif_carrier_ok(dev))
2653 status = t3b2_mac_watchdog_task(&p->mac);
2655 p->mac.stats.num_toggled++;
2656 else if (status == 2) {
2657 struct cmac *mac = &p->mac;
2659 t3_mac_set_mtu(mac, dev->mtu);
2660 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2661 cxgb_set_rxmode(dev);
2662 t3_link_start(&p->phy, mac, &p->link_config);
2663 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2664 t3_port_intr_enable(adapter, p->port_id);
2665 p->mac.stats.num_resets++;
2672 static void t3_adap_check_task(struct work_struct *work)
2674 struct adapter *adapter = container_of(work, struct adapter,
2675 adap_check_task.work);
2676 const struct adapter_params *p = &adapter->params;
2678 unsigned int v, status, reset;
2680 adapter->check_task_cnt++;
2682 check_link_status(adapter);
2684 /* Accumulate MAC stats if needed */
2685 if (!p->linkpoll_period ||
2686 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2687 p->stats_update_period) {
2688 mac_stats_update(adapter);
2689 adapter->check_task_cnt = 0;
2692 if (p->rev == T3_REV_B2)
2693 check_t3b2_mac(adapter);
2696 * Scan the XGMAC's to check for various conditions which we want to
2697 * monitor in a periodic polling manner rather than via an interrupt
2698 * condition. This is used for conditions which would otherwise flood
2699 * the system with interrupts and we only really need to know that the
2700 * conditions are "happening" ... For each condition we count the
2701 * detection of the condition and reset it for the next polling loop.
2703 for_each_port(adapter, port) {
2704 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2707 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2709 if (cause & F_RXFIFO_OVERFLOW) {
2710 mac->stats.rx_fifo_ovfl++;
2711 reset |= F_RXFIFO_OVERFLOW;
2714 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2718 * We do the same as above for FL_EMPTY interrupts.
2720 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2723 if (status & F_FLEMPTY) {
2724 struct sge_qset *qs = &adapter->sge.qs[0];
2729 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2733 qs->fl[i].empty += (v & 1);
2741 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2743 /* Schedule the next check update if any port is active. */
2744 spin_lock_irq(&adapter->work_lock);
2745 if (adapter->open_device_map & PORT_MASK)
2746 schedule_chk_task(adapter);
2747 spin_unlock_irq(&adapter->work_lock);
2751 * Processes external (PHY) interrupts in process context.
2753 static void ext_intr_task(struct work_struct *work)
2755 struct adapter *adapter = container_of(work, struct adapter,
2756 ext_intr_handler_task);
2759 /* Disable link fault interrupts */
2760 for_each_port(adapter, i) {
2761 struct net_device *dev = adapter->port[i];
2762 struct port_info *p = netdev_priv(dev);
2764 t3_xgm_intr_disable(adapter, i);
2765 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2768 /* Re-enable link fault interrupts */
2769 t3_phy_intr_handler(adapter);
2771 for_each_port(adapter, i)
2772 t3_xgm_intr_enable(adapter, i);
2774 /* Now reenable external interrupts */
2775 spin_lock_irq(&adapter->work_lock);
2776 if (adapter->slow_intr_mask) {
2777 adapter->slow_intr_mask |= F_T3DBG;
2778 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2779 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2780 adapter->slow_intr_mask);
2782 spin_unlock_irq(&adapter->work_lock);
2786 * Interrupt-context handler for external (PHY) interrupts.
2788 void t3_os_ext_intr_handler(struct adapter *adapter)
2791 * Schedule a task to handle external interrupts as they may be slow
2792 * and we use a mutex to protect MDIO registers. We disable PHY
2793 * interrupts in the meantime and let the task reenable them when
2796 spin_lock(&adapter->work_lock);
2797 if (adapter->slow_intr_mask) {
2798 adapter->slow_intr_mask &= ~F_T3DBG;
2799 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2800 adapter->slow_intr_mask);
2801 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2803 spin_unlock(&adapter->work_lock);
2806 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2808 struct net_device *netdev = adapter->port[port_id];
2809 struct port_info *pi = netdev_priv(netdev);
2811 spin_lock(&adapter->work_lock);
2813 spin_unlock(&adapter->work_lock);
2816 static int t3_adapter_error(struct adapter *adapter, int reset)
2820 if (is_offload(adapter) &&
2821 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2822 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2823 offload_close(&adapter->tdev);
2826 /* Stop all ports */
2827 for_each_port(adapter, i) {
2828 struct net_device *netdev = adapter->port[i];
2830 if (netif_running(netdev))
2834 /* Stop SGE timers */
2835 t3_stop_sge_timers(adapter);
2837 adapter->flags &= ~FULL_INIT_DONE;
2840 ret = t3_reset_adapter(adapter);
2842 pci_disable_device(adapter->pdev);
2847 static int t3_reenable_adapter(struct adapter *adapter)
2849 if (pci_enable_device(adapter->pdev)) {
2850 dev_err(&adapter->pdev->dev,
2851 "Cannot re-enable PCI device after reset.\n");
2854 pci_set_master(adapter->pdev);
2855 pci_restore_state(adapter->pdev);
2857 /* Free sge resources */
2858 t3_free_sge_resources(adapter);
2860 if (t3_replay_prep_adapter(adapter))
2868 static void t3_resume_ports(struct adapter *adapter)
2872 /* Restart the ports */
2873 for_each_port(adapter, i) {
2874 struct net_device *netdev = adapter->port[i];
2876 if (netif_running(netdev)) {
2877 if (cxgb_open(netdev)) {
2878 dev_err(&adapter->pdev->dev,
2879 "can't bring device back up"
2886 if (is_offload(adapter) && !ofld_disable)
2887 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2891 * processes a fatal error.
2892 * Bring the ports down, reset the chip, bring the ports back up.
2894 static void fatal_error_task(struct work_struct *work)
2896 struct adapter *adapter = container_of(work, struct adapter,
2897 fatal_error_handler_task);
2901 err = t3_adapter_error(adapter, 1);
2903 err = t3_reenable_adapter(adapter);
2905 t3_resume_ports(adapter);
2907 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2911 void t3_fatal_err(struct adapter *adapter)
2913 unsigned int fw_status[4];
2915 if (adapter->flags & FULL_INIT_DONE) {
2916 t3_sge_stop(adapter);
2917 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2918 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2919 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2920 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2922 spin_lock(&adapter->work_lock);
2923 t3_intr_disable(adapter);
2924 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2925 spin_unlock(&adapter->work_lock);
2927 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2928 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2929 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2930 fw_status[0], fw_status[1],
2931 fw_status[2], fw_status[3]);
2935 * t3_io_error_detected - called when PCI error is detected
2936 * @pdev: Pointer to PCI device
2937 * @state: The current pci connection state
2939 * This function is called after a PCI bus error affecting
2940 * this device has been detected.
2942 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2943 pci_channel_state_t state)
2945 struct adapter *adapter = pci_get_drvdata(pdev);
2948 if (state == pci_channel_io_perm_failure)
2949 return PCI_ERS_RESULT_DISCONNECT;
2951 ret = t3_adapter_error(adapter, 0);
2953 /* Request a slot reset. */
2954 return PCI_ERS_RESULT_NEED_RESET;
2958 * t3_io_slot_reset - called after the pci bus has been reset.
2959 * @pdev: Pointer to PCI device
2961 * Restart the card from scratch, as if from a cold-boot.
2963 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2965 struct adapter *adapter = pci_get_drvdata(pdev);
2967 if (!t3_reenable_adapter(adapter))
2968 return PCI_ERS_RESULT_RECOVERED;
2970 return PCI_ERS_RESULT_DISCONNECT;
2974 * t3_io_resume - called when traffic can start flowing again.
2975 * @pdev: Pointer to PCI device
2977 * This callback is called when the error recovery driver tells us that
2978 * its OK to resume normal operation.
2980 static void t3_io_resume(struct pci_dev *pdev)
2982 struct adapter *adapter = pci_get_drvdata(pdev);
2984 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2985 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2987 t3_resume_ports(adapter);
2990 static struct pci_error_handlers t3_err_handler = {
2991 .error_detected = t3_io_error_detected,
2992 .slot_reset = t3_io_slot_reset,
2993 .resume = t3_io_resume,
2997 * Set the number of qsets based on the number of CPUs and the number of ports,
2998 * not to exceed the number of available qsets, assuming there are enough qsets
3001 static void set_nqsets(struct adapter *adap)
3004 int num_cpus = num_online_cpus();
3005 int hwports = adap->params.nports;
3006 int nqsets = adap->msix_nvectors - 1;
3008 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3010 (hwports * nqsets > SGE_QSETS ||
3011 num_cpus >= nqsets / hwports))
3013 if (nqsets > num_cpus)
3015 if (nqsets < 1 || hwports == 4)
3020 for_each_port(adap, i) {
3021 struct port_info *pi = adap2pinfo(adap, i);
3024 pi->nqsets = nqsets;
3025 j = pi->first_qset + nqsets;
3027 dev_info(&adap->pdev->dev,
3028 "Port %d using %d queue sets.\n", i, nqsets);
3032 static int __devinit cxgb_enable_msix(struct adapter *adap)
3034 struct msix_entry entries[SGE_QSETS + 1];
3038 vectors = ARRAY_SIZE(entries);
3039 for (i = 0; i < vectors; ++i)
3040 entries[i].entry = i;
3042 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3046 pci_disable_msix(adap->pdev);
3048 if (!err && vectors < (adap->params.nports + 1)) {
3049 pci_disable_msix(adap->pdev);
3054 for (i = 0; i < vectors; ++i)
3055 adap->msix_info[i].vec = entries[i].vector;
3056 adap->msix_nvectors = vectors;
3062 static void __devinit print_port_info(struct adapter *adap,
3063 const struct adapter_info *ai)
3065 static const char *pci_variant[] = {
3066 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3073 snprintf(buf, sizeof(buf), "%s x%d",
3074 pci_variant[adap->params.pci.variant],
3075 adap->params.pci.width);
3077 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3078 pci_variant[adap->params.pci.variant],
3079 adap->params.pci.speed, adap->params.pci.width);
3081 for_each_port(adap, i) {
3082 struct net_device *dev = adap->port[i];
3083 const struct port_info *pi = netdev_priv(dev);
3085 if (!test_bit(i, &adap->registered_device_map))
3087 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3088 dev->name, ai->desc, pi->phy.desc,
3089 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3090 (adap->flags & USING_MSIX) ? " MSI-X" :
3091 (adap->flags & USING_MSI) ? " MSI" : "");
3092 if (adap->name == dev->name && adap->params.vpd.mclk)
3094 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3095 adap->name, t3_mc7_size(&adap->cm) >> 20,
3096 t3_mc7_size(&adap->pmtx) >> 20,
3097 t3_mc7_size(&adap->pmrx) >> 20,
3098 adap->params.vpd.sn);
3102 static const struct net_device_ops cxgb_netdev_ops = {
3103 .ndo_open = cxgb_open,
3104 .ndo_stop = cxgb_close,
3105 .ndo_start_xmit = t3_eth_xmit,
3106 .ndo_get_stats = cxgb_get_stats,
3107 .ndo_validate_addr = eth_validate_addr,
3108 .ndo_set_multicast_list = cxgb_set_rxmode,
3109 .ndo_do_ioctl = cxgb_ioctl,
3110 .ndo_change_mtu = cxgb_change_mtu,
3111 .ndo_set_mac_address = cxgb_set_mac_addr,
3112 .ndo_vlan_rx_register = vlan_rx_register,
3113 #ifdef CONFIG_NET_POLL_CONTROLLER
3114 .ndo_poll_controller = cxgb_netpoll,
3118 static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3120 struct port_info *pi = netdev_priv(dev);
3122 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3123 pi->iscsic.mac_addr[3] |= 0x80;
3126 static int __devinit init_one(struct pci_dev *pdev,
3127 const struct pci_device_id *ent)
3129 static int version_printed;
3131 int i, err, pci_using_dac = 0;
3132 resource_size_t mmio_start, mmio_len;
3133 const struct adapter_info *ai;
3134 struct adapter *adapter = NULL;
3135 struct port_info *pi;
3137 if (!version_printed) {
3138 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3143 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3145 printk(KERN_ERR DRV_NAME
3146 ": cannot initialize work queue\n");
3151 err = pci_request_regions(pdev, DRV_NAME);
3153 /* Just info, some other driver may have claimed the device. */
3154 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3158 err = pci_enable_device(pdev);
3160 dev_err(&pdev->dev, "cannot enable PCI device\n");
3161 goto out_release_regions;
3164 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3166 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3168 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3169 "coherent allocations\n");
3170 goto out_disable_device;
3172 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3173 dev_err(&pdev->dev, "no usable DMA configuration\n");
3174 goto out_disable_device;
3177 pci_set_master(pdev);
3178 pci_save_state(pdev);
3180 mmio_start = pci_resource_start(pdev, 0);
3181 mmio_len = pci_resource_len(pdev, 0);
3182 ai = t3_get_adapter_info(ent->driver_data);
3184 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3187 goto out_disable_device;
3190 adapter->nofail_skb =
3191 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3192 if (!adapter->nofail_skb) {
3193 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3195 goto out_free_adapter;
3198 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3199 if (!adapter->regs) {
3200 dev_err(&pdev->dev, "cannot map device registers\n");
3202 goto out_free_adapter;
3205 adapter->pdev = pdev;
3206 adapter->name = pci_name(pdev);
3207 adapter->msg_enable = dflt_msg_enable;
3208 adapter->mmio_len = mmio_len;
3210 mutex_init(&adapter->mdio_lock);
3211 spin_lock_init(&adapter->work_lock);
3212 spin_lock_init(&adapter->stats_lock);
3214 INIT_LIST_HEAD(&adapter->adapter_list);
3215 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3216 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3217 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3219 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3220 struct net_device *netdev;
3222 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3228 SET_NETDEV_DEV(netdev, &pdev->dev);
3230 adapter->port[i] = netdev;
3231 pi = netdev_priv(netdev);
3232 pi->adapter = adapter;
3233 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3235 netif_carrier_off(netdev);
3236 netif_tx_stop_all_queues(netdev);
3237 netdev->irq = pdev->irq;
3238 netdev->mem_start = mmio_start;
3239 netdev->mem_end = mmio_start + mmio_len - 1;
3240 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3241 netdev->features |= NETIF_F_GRO;
3243 netdev->features |= NETIF_F_HIGHDMA;
3245 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3246 netdev->netdev_ops = &cxgb_netdev_ops;
3247 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3250 pci_set_drvdata(pdev, adapter);
3251 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3257 * The card is now ready to go. If any errors occur during device
3258 * registration we do not fail the whole card but rather proceed only
3259 * with the ports we manage to register successfully. However we must
3260 * register at least one net device.
3262 for_each_port(adapter, i) {
3263 err = register_netdev(adapter->port[i]);
3265 dev_warn(&pdev->dev,
3266 "cannot register net device %s, skipping\n",
3267 adapter->port[i]->name);
3270 * Change the name we use for messages to the name of
3271 * the first successfully registered interface.
3273 if (!adapter->registered_device_map)
3274 adapter->name = adapter->port[i]->name;
3276 __set_bit(i, &adapter->registered_device_map);
3279 if (!adapter->registered_device_map) {
3280 dev_err(&pdev->dev, "could not register any net devices\n");
3284 for_each_port(adapter, i)
3285 cxgb3_init_iscsi_mac(adapter->port[i]);
3287 /* Driver's ready. Reflect it on LEDs */
3288 t3_led_ready(adapter);
3290 if (is_offload(adapter)) {
3291 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3292 cxgb3_adapter_ofld(adapter);
3295 /* See what interrupts we'll be using */
3296 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3297 adapter->flags |= USING_MSIX;
3298 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3299 adapter->flags |= USING_MSI;
3301 set_nqsets(adapter);
3303 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3306 print_port_info(adapter, ai);
3310 iounmap(adapter->regs);
3311 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3312 if (adapter->port[i])
3313 free_netdev(adapter->port[i]);
3319 pci_disable_device(pdev);
3320 out_release_regions:
3321 pci_release_regions(pdev);
3322 pci_set_drvdata(pdev, NULL);
3326 static void __devexit remove_one(struct pci_dev *pdev)
3328 struct adapter *adapter = pci_get_drvdata(pdev);
3333 t3_sge_stop(adapter);
3334 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3337 if (is_offload(adapter)) {
3338 cxgb3_adapter_unofld(adapter);
3339 if (test_bit(OFFLOAD_DEVMAP_BIT,
3340 &adapter->open_device_map))
3341 offload_close(&adapter->tdev);
3344 for_each_port(adapter, i)
3345 if (test_bit(i, &adapter->registered_device_map))
3346 unregister_netdev(adapter->port[i]);
3348 t3_stop_sge_timers(adapter);
3349 t3_free_sge_resources(adapter);
3350 cxgb_disable_msi(adapter);
3352 for_each_port(adapter, i)
3353 if (adapter->port[i])
3354 free_netdev(adapter->port[i]);
3356 iounmap(adapter->regs);
3357 if (adapter->nofail_skb)
3358 kfree_skb(adapter->nofail_skb);
3360 pci_release_regions(pdev);
3361 pci_disable_device(pdev);
3362 pci_set_drvdata(pdev, NULL);
3366 static struct pci_driver driver = {
3368 .id_table = cxgb3_pci_tbl,
3370 .remove = __devexit_p(remove_one),
3371 .err_handler = &t3_err_handler,
3374 static int __init cxgb3_init_module(void)
3378 cxgb3_offload_init();
3380 ret = pci_register_driver(&driver);
3384 static void __exit cxgb3_cleanup_module(void)
3386 pci_unregister_driver(&driver);
3388 destroy_workqueue(cxgb3_wq);
3391 module_init(cxgb3_init_module);
3392 module_exit(cxgb3_cleanup_module);