2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <linux/stringify.h>
48 #include <asm/uaccess.h>
51 #include "cxgb3_ioctl.h"
53 #include "cxgb3_offload.h"
56 #include "cxgb3_ctl_defs.h"
58 #include "firmware_exports.h"
61 MAX_TXQ_ENTRIES = 16384,
62 MAX_CTRL_TXQ_ENTRIES = 1024,
63 MAX_RSPQ_ENTRIES = 16384,
64 MAX_RX_BUFFERS = 16384,
65 MAX_RX_JUMBO_BUFFERS = 16384,
67 MIN_CTRL_TXQ_ENTRIES = 4,
68 MIN_RSPQ_ENTRIES = 32,
72 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
74 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
75 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
76 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
78 #define EEPROM_MAGIC 0x38E2F10C
80 #define CH_DEVICE(devid, idx) \
81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
83 static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
84 CH_DEVICE(0x20, 0), /* PE9000 */
85 CH_DEVICE(0x21, 1), /* T302E */
86 CH_DEVICE(0x22, 2), /* T310E */
87 CH_DEVICE(0x23, 3), /* T320X */
88 CH_DEVICE(0x24, 1), /* T302X */
89 CH_DEVICE(0x25, 3), /* T320E */
90 CH_DEVICE(0x26, 2), /* T310X */
91 CH_DEVICE(0x30, 2), /* T3B10 */
92 CH_DEVICE(0x31, 3), /* T3B20 */
93 CH_DEVICE(0x32, 1), /* T3B02 */
94 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
95 CH_DEVICE(0x36, 3), /* S320E-CR */
96 CH_DEVICE(0x37, 7), /* N320E-G2 */
100 MODULE_DESCRIPTION(DRV_DESC);
101 MODULE_AUTHOR("Chelsio Communications");
102 MODULE_LICENSE("Dual BSD/GPL");
103 MODULE_VERSION(DRV_VERSION);
104 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
106 static int dflt_msg_enable = DFLT_MSG_ENABLE;
108 module_param(dflt_msg_enable, int, 0644);
109 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
112 * The driver uses the best interrupt scheme available on a platform in the
113 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
114 * of these schemes the driver may consider as follows:
116 * msi = 2: choose from among all three options
117 * msi = 1: only consider MSI and pin interrupts
118 * msi = 0: force pin interrupts
122 module_param(msi, int, 0644);
123 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
126 * The driver enables offload as a default.
127 * To disable it, use ofld_disable = 1.
130 static int ofld_disable = 0;
132 module_param(ofld_disable, int, 0644);
133 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
136 * We have work elements that we need to cancel when an interface is taken
137 * down. Normally the work elements would be executed by keventd but that
138 * can deadlock because of linkwatch. If our close method takes the rtnl
139 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this.
143 static struct workqueue_struct *cxgb3_wq;
146 * link_report - show link status and link speed/duplex
147 * @p: the port whose settings are to be reported
149 * Shows the link status, speed, and duplex of a port.
151 static void link_report(struct net_device *dev)
153 if (!netif_carrier_ok(dev))
154 printk(KERN_INFO "%s: link down\n", dev->name);
156 const char *s = "10Mbps";
157 const struct port_info *p = netdev_priv(dev);
159 switch (p->link_config.speed) {
171 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
172 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
176 static void enable_tx_fifo_drain(struct adapter *adapter,
177 struct port_info *pi)
179 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
181 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
182 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
183 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
186 static void disable_tx_fifo_drain(struct adapter *adapter,
187 struct port_info *pi)
189 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
193 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
195 struct net_device *dev = adap->port[port_id];
196 struct port_info *pi = netdev_priv(dev);
198 if (state == netif_carrier_ok(dev))
202 struct cmac *mac = &pi->mac;
204 netif_carrier_on(dev);
206 disable_tx_fifo_drain(adap, pi);
208 /* Clear local faults */
209 t3_xgm_intr_disable(adap, pi->port_id);
210 t3_read_reg(adap, A_XGM_INT_STATUS +
213 A_XGM_INT_CAUSE + pi->mac.offset,
216 t3_set_reg_field(adap,
219 F_XGM_INT, F_XGM_INT);
220 t3_xgm_intr_enable(adap, pi->port_id);
222 t3_mac_enable(mac, MAC_DIRECTION_TX);
224 netif_carrier_off(dev);
227 enable_tx_fifo_drain(adap, pi);
233 * t3_os_link_changed - handle link status changes
234 * @adapter: the adapter associated with the link change
235 * @port_id: the port index whose limk status has changed
236 * @link_stat: the new status of the link
237 * @speed: the new speed setting
238 * @duplex: the new duplex setting
239 * @pause: the new flow-control setting
241 * This is the OS-dependent handler for link status changes. The OS
242 * neutral handler takes care of most of the processing for these events,
243 * then calls this handler for any OS-specific processing.
245 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
246 int speed, int duplex, int pause)
248 struct net_device *dev = adapter->port[port_id];
249 struct port_info *pi = netdev_priv(dev);
250 struct cmac *mac = &pi->mac;
252 /* Skip changes from disabled ports. */
253 if (!netif_running(dev))
256 if (link_stat != netif_carrier_ok(dev)) {
258 disable_tx_fifo_drain(adapter, pi);
260 t3_mac_enable(mac, MAC_DIRECTION_RX);
262 /* Clear local faults */
263 t3_xgm_intr_disable(adapter, pi->port_id);
264 t3_read_reg(adapter, A_XGM_INT_STATUS +
266 t3_write_reg(adapter,
267 A_XGM_INT_CAUSE + pi->mac.offset,
270 t3_set_reg_field(adapter,
271 A_XGM_INT_ENABLE + pi->mac.offset,
272 F_XGM_INT, F_XGM_INT);
273 t3_xgm_intr_enable(adapter, pi->port_id);
275 netif_carrier_on(dev);
277 netif_carrier_off(dev);
279 t3_xgm_intr_disable(adapter, pi->port_id);
280 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
281 t3_set_reg_field(adapter,
282 A_XGM_INT_ENABLE + pi->mac.offset,
286 pi->phy.ops->power_down(&pi->phy, 1);
288 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
289 t3_mac_disable(mac, MAC_DIRECTION_RX);
290 t3_link_start(&pi->phy, mac, &pi->link_config);
293 enable_tx_fifo_drain(adapter, pi);
301 * t3_os_phymod_changed - handle PHY module changes
302 * @phy: the PHY reporting the module change
303 * @mod_type: new module type
305 * This is the OS-dependent handler for PHY module changes. It is
306 * invoked when a PHY module is removed or inserted for any OS-specific
309 void t3_os_phymod_changed(struct adapter *adap, int port_id)
311 static const char *mod_str[] = {
312 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
315 const struct net_device *dev = adap->port[port_id];
316 const struct port_info *pi = netdev_priv(dev);
318 if (pi->phy.modtype == phy_modtype_none)
319 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
321 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
322 mod_str[pi->phy.modtype]);
325 static void cxgb_set_rxmode(struct net_device *dev)
327 struct port_info *pi = netdev_priv(dev);
329 t3_mac_set_rx_mode(&pi->mac, dev);
333 * link_start - enable a port
334 * @dev: the device to enable
336 * Performs the MAC and PHY actions needed to enable a port.
338 static void link_start(struct net_device *dev)
340 struct port_info *pi = netdev_priv(dev);
341 struct cmac *mac = &pi->mac;
344 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
345 t3_mac_set_mtu(mac, dev->mtu);
346 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
347 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
348 t3_mac_set_rx_mode(mac, dev);
349 t3_link_start(&pi->phy, mac, &pi->link_config);
350 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
353 static inline void cxgb_disable_msi(struct adapter *adapter)
355 if (adapter->flags & USING_MSIX) {
356 pci_disable_msix(adapter->pdev);
357 adapter->flags &= ~USING_MSIX;
358 } else if (adapter->flags & USING_MSI) {
359 pci_disable_msi(adapter->pdev);
360 adapter->flags &= ~USING_MSI;
365 * Interrupt handler for asynchronous events used with MSI-X.
367 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
369 t3_slow_intr_handler(cookie);
374 * Name the MSI-X interrupts.
376 static void name_msix_vecs(struct adapter *adap)
378 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
380 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
381 adap->msix_info[0].desc[n] = 0;
383 for_each_port(adap, j) {
384 struct net_device *d = adap->port[j];
385 const struct port_info *pi = netdev_priv(d);
387 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
388 snprintf(adap->msix_info[msi_idx].desc, n,
389 "%s-%d", d->name, pi->first_qset + i);
390 adap->msix_info[msi_idx].desc[n] = 0;
395 static int request_msix_data_irqs(struct adapter *adap)
397 int i, j, err, qidx = 0;
399 for_each_port(adap, i) {
400 int nqsets = adap2pinfo(adap, i)->nqsets;
402 for (j = 0; j < nqsets; ++j) {
403 err = request_irq(adap->msix_info[qidx + 1].vec,
404 t3_intr_handler(adap,
407 adap->msix_info[qidx + 1].desc,
408 &adap->sge.qs[qidx]);
411 free_irq(adap->msix_info[qidx + 1].vec,
412 &adap->sge.qs[qidx]);
421 static void free_irq_resources(struct adapter *adapter)
423 if (adapter->flags & USING_MSIX) {
426 free_irq(adapter->msix_info[0].vec, adapter);
427 for_each_port(adapter, i)
428 n += adap2pinfo(adapter, i)->nqsets;
430 for (i = 0; i < n; ++i)
431 free_irq(adapter->msix_info[i + 1].vec,
432 &adapter->sge.qs[i]);
434 free_irq(adapter->pdev->irq, adapter);
437 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
442 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450 static int init_tp_parity(struct adapter *adap)
454 struct cpl_set_tcb_field *greq;
455 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
457 t3_tp_set_offload_mode(adap, 1);
459 for (i = 0; i < 16; i++) {
460 struct cpl_smt_write_req *req;
462 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
464 skb = adap->nofail_skb;
468 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
469 memset(req, 0, sizeof(*req));
470 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
472 req->mtu_idx = NMTUS - 1;
474 t3_mgmt_tx(adap, skb);
475 if (skb == adap->nofail_skb) {
476 await_mgmt_replies(adap, cnt, i + 1);
477 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
478 if (!adap->nofail_skb)
483 for (i = 0; i < 2048; i++) {
484 struct cpl_l2t_write_req *req;
486 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
488 skb = adap->nofail_skb;
492 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
493 memset(req, 0, sizeof(*req));
494 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
495 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
496 req->params = htonl(V_L2T_W_IDX(i));
497 t3_mgmt_tx(adap, skb);
498 if (skb == adap->nofail_skb) {
499 await_mgmt_replies(adap, cnt, 16 + i + 1);
500 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
501 if (!adap->nofail_skb)
506 for (i = 0; i < 2048; i++) {
507 struct cpl_rte_write_req *req;
509 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
511 skb = adap->nofail_skb;
515 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
516 memset(req, 0, sizeof(*req));
517 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
518 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
519 req->l2t_idx = htonl(V_L2T_W_IDX(i));
520 t3_mgmt_tx(adap, skb);
521 if (skb == adap->nofail_skb) {
522 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
523 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
524 if (!adap->nofail_skb)
529 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
531 skb = adap->nofail_skb;
535 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
536 memset(greq, 0, sizeof(*greq));
537 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
538 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
539 greq->mask = cpu_to_be64(1);
540 t3_mgmt_tx(adap, skb);
542 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
543 if (skb == adap->nofail_skb) {
544 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
548 t3_tp_set_offload_mode(adap, 0);
552 t3_tp_set_offload_mode(adap, 0);
557 * setup_rss - configure RSS
560 * Sets up RSS to distribute packets to multiple receive queues. We
561 * configure the RSS CPU lookup table to distribute to the number of HW
562 * receive queues, and the response queue lookup table to narrow that
563 * down to the response queues actually configured for each port.
564 * We always configure the RSS mapping for two ports since the mapping
565 * table has plenty of entries.
567 static void setup_rss(struct adapter *adap)
570 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
571 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
572 u8 cpus[SGE_QSETS + 1];
573 u16 rspq_map[RSS_TABLE_SIZE];
575 for (i = 0; i < SGE_QSETS; ++i)
577 cpus[SGE_QSETS] = 0xff; /* terminator */
579 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
580 rspq_map[i] = i % nq0;
581 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
584 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
585 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
586 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589 static void init_napi(struct adapter *adap)
593 for (i = 0; i < SGE_QSETS; i++) {
594 struct sge_qset *qs = &adap->sge.qs[i];
597 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
602 * netif_napi_add() can be called only once per napi_struct because it
603 * adds each new napi_struct to a list. Be careful not to call it a
604 * second time, e.g., during EEH recovery, by making a note of it.
606 adap->flags |= NAPI_INIT;
610 * Wait until all NAPI handlers are descheduled. This includes the handlers of
611 * both netdevices representing interfaces and the dummy ones for the extra
614 static void quiesce_rx(struct adapter *adap)
618 for (i = 0; i < SGE_QSETS; i++)
619 if (adap->sge.qs[i].adap)
620 napi_disable(&adap->sge.qs[i].napi);
623 static void enable_all_napi(struct adapter *adap)
626 for (i = 0; i < SGE_QSETS; i++)
627 if (adap->sge.qs[i].adap)
628 napi_enable(&adap->sge.qs[i].napi);
632 * set_qset_lro - Turn a queue set's LRO capability on and off
633 * @dev: the device the qset is attached to
634 * @qset_idx: the queue set index
635 * @val: the LRO switch
637 * Sets LRO on or off for a particular queue set.
638 * the device's features flag is updated to reflect the LRO
639 * capability when all queues belonging to the device are
642 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
644 struct port_info *pi = netdev_priv(dev);
645 struct adapter *adapter = pi->adapter;
647 adapter->params.sge.qset[qset_idx].lro = !!val;
648 adapter->sge.qs[qset_idx].lro_enabled = !!val;
652 * setup_sge_qsets - configure SGE Tx/Rx/response queues
655 * Determines how many sets of SGE queues to use and initializes them.
656 * We support multiple queue sets per port if we have MSI-X, otherwise
657 * just one queue set per port.
659 static int setup_sge_qsets(struct adapter *adap)
661 int i, j, err, irq_idx = 0, qset_idx = 0;
662 unsigned int ntxq = SGE_TXQ_PER_SET;
664 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
667 for_each_port(adap, i) {
668 struct net_device *dev = adap->port[i];
669 struct port_info *pi = netdev_priv(dev);
671 pi->qs = &adap->sge.qs[pi->first_qset];
672 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
673 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
674 err = t3_sge_alloc_qset(adap, qset_idx, 1,
675 (adap->flags & USING_MSIX) ? qset_idx + 1 :
677 &adap->params.sge.qset[qset_idx], ntxq, dev,
678 netdev_get_tx_queue(dev, j));
680 t3_free_sge_resources(adap);
689 static ssize_t attr_show(struct device *d, char *buf,
690 ssize_t(*format) (struct net_device *, char *))
694 /* Synchronize with ioctls that may shut down the device */
696 len = (*format) (to_net_dev(d), buf);
701 static ssize_t attr_store(struct device *d,
702 const char *buf, size_t len,
703 ssize_t(*set) (struct net_device *, unsigned int),
704 unsigned int min_val, unsigned int max_val)
710 if (!capable(CAP_NET_ADMIN))
713 val = simple_strtoul(buf, &endp, 0);
714 if (endp == buf || val < min_val || val > max_val)
718 ret = (*set) (to_net_dev(d), val);
725 #define CXGB3_SHOW(name, val_expr) \
726 static ssize_t format_##name(struct net_device *dev, char *buf) \
728 struct port_info *pi = netdev_priv(dev); \
729 struct adapter *adap = pi->adapter; \
730 return sprintf(buf, "%u\n", val_expr); \
732 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
735 return attr_show(d, buf, format_##name); \
738 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
740 struct port_info *pi = netdev_priv(dev);
741 struct adapter *adap = pi->adapter;
742 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
744 if (adap->flags & FULL_INIT_DONE)
746 if (val && adap->params.rev == 0)
748 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
751 adap->params.mc5.nfilters = val;
755 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
756 const char *buf, size_t len)
758 return attr_store(d, buf, len, set_nfilters, 0, ~0);
761 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
763 struct port_info *pi = netdev_priv(dev);
764 struct adapter *adap = pi->adapter;
766 if (adap->flags & FULL_INIT_DONE)
768 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
771 adap->params.mc5.nservers = val;
775 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
776 const char *buf, size_t len)
778 return attr_store(d, buf, len, set_nservers, 0, ~0);
781 #define CXGB3_ATTR_R(name, val_expr) \
782 CXGB3_SHOW(name, val_expr) \
783 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
785 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
786 CXGB3_SHOW(name, val_expr) \
787 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
789 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
790 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
791 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
793 static struct attribute *cxgb3_attrs[] = {
794 &dev_attr_cam_size.attr,
795 &dev_attr_nfilters.attr,
796 &dev_attr_nservers.attr,
800 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
802 static ssize_t tm_attr_show(struct device *d,
803 char *buf, int sched)
805 struct port_info *pi = netdev_priv(to_net_dev(d));
806 struct adapter *adap = pi->adapter;
807 unsigned int v, addr, bpt, cpt;
810 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
812 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
813 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
816 bpt = (v >> 8) & 0xff;
819 len = sprintf(buf, "disabled\n");
821 v = (adap->params.vpd.cclk * 1000) / cpt;
822 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
828 static ssize_t tm_attr_store(struct device *d,
829 const char *buf, size_t len, int sched)
831 struct port_info *pi = netdev_priv(to_net_dev(d));
832 struct adapter *adap = pi->adapter;
837 if (!capable(CAP_NET_ADMIN))
840 val = simple_strtoul(buf, &endp, 0);
841 if (endp == buf || val > 10000000)
845 ret = t3_config_sched(adap, val, sched);
852 #define TM_ATTR(name, sched) \
853 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
856 return tm_attr_show(d, buf, sched); \
858 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859 const char *buf, size_t len) \
861 return tm_attr_store(d, buf, len, sched); \
863 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
874 static struct attribute *offload_attrs[] = {
875 &dev_attr_sched0.attr,
876 &dev_attr_sched1.attr,
877 &dev_attr_sched2.attr,
878 &dev_attr_sched3.attr,
879 &dev_attr_sched4.attr,
880 &dev_attr_sched5.attr,
881 &dev_attr_sched6.attr,
882 &dev_attr_sched7.attr,
886 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
889 * Sends an sk_buff to an offload queue driver
890 * after dealing with any active network taps.
892 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
897 ret = t3_offload_tx(tdev, skb);
902 static int write_smt_entry(struct adapter *adapter, int idx)
904 struct cpl_smt_write_req *req;
905 struct port_info *pi = netdev_priv(adapter->port[idx]);
906 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
911 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
912 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
913 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
916 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
917 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
919 offload_tx(&adapter->tdev, skb);
923 static int init_smt(struct adapter *adapter)
927 for_each_port(adapter, i)
928 write_smt_entry(adapter, i);
932 static void init_port_mtus(struct adapter *adapter)
934 unsigned int mtus = adapter->port[0]->mtu;
936 if (adapter->port[1])
937 mtus |= adapter->port[1]->mtu << 16;
938 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
945 struct mngt_pktsched_wr *req;
948 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
950 skb = adap->nofail_skb;
954 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
955 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
956 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
962 ret = t3_mgmt_tx(adap, skb);
963 if (skb == adap->nofail_skb) {
964 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
966 if (!adap->nofail_skb)
973 static int bind_qsets(struct adapter *adap)
977 for_each_port(adap, i) {
978 const struct port_info *pi = adap2pinfo(adap, i);
980 for (j = 0; j < pi->nqsets; ++j) {
981 int ret = send_pktsched_cmd(adap, 1,
982 pi->first_qset + j, -1,
992 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
993 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
994 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
995 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
996 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
997 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
998 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
999 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1000 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001 MODULE_FIRMWARE(FW_FNAME);
1002 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1008 static inline const char *get_edc_fw_name(int edc_idx)
1010 const char *fw_name = NULL;
1013 case EDC_OPT_AEL2005:
1014 fw_name = AEL2005_OPT_EDC_NAME;
1016 case EDC_TWX_AEL2005:
1017 fw_name = AEL2005_TWX_EDC_NAME;
1019 case EDC_TWX_AEL2020:
1020 fw_name = AEL2020_TWX_EDC_NAME;
1026 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1028 struct adapter *adapter = phy->adapter;
1029 const struct firmware *fw;
1033 u16 *cache = phy->phy_cache;
1036 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1038 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1040 dev_err(&adapter->pdev->dev,
1041 "could not upgrade firmware: unable to load %s\n",
1046 /* check size, take checksum in account */
1047 if (fw->size > size + 4) {
1048 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049 (unsigned int)fw->size, size + 4);
1053 /* compute checksum */
1054 p = (const __be32 *)fw->data;
1055 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056 csum += ntohl(p[i]);
1058 if (csum != 0xffffffff) {
1059 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1064 for (i = 0; i < size / 4 ; i++) {
1065 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1069 release_firmware(fw);
1074 static int upgrade_fw(struct adapter *adap)
1077 const struct firmware *fw;
1078 struct device *dev = &adap->pdev->dev;
1080 ret = request_firmware(&fw, FW_FNAME, dev);
1082 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1086 ret = t3_load_fw(adap, fw->data, fw->size);
1087 release_firmware(fw);
1090 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1093 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1099 static inline char t3rev2char(struct adapter *adapter)
1103 switch(adapter->params.rev) {
1115 static int update_tpsram(struct adapter *adap)
1117 const struct firmware *tpsram;
1119 struct device *dev = &adap->pdev->dev;
1123 rev = t3rev2char(adap);
1127 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1129 ret = request_firmware(&tpsram, buf, dev);
1131 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1136 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1138 goto release_tpsram;
1140 ret = t3_set_proto_sram(adap, tpsram->data);
1143 "successful update of protocol engine "
1145 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1147 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1150 dev_err(dev, "loading protocol SRAM failed\n");
1153 release_firmware(tpsram);
1159 * cxgb_up - enable the adapter
1160 * @adapter: adapter being enabled
1162 * Called when the first port is enabled, this function performs the
1163 * actions necessary to make an adapter operational, such as completing
1164 * the initialization of HW modules, and enabling interrupts.
1166 * Must be called with the rtnl lock held.
1168 static int cxgb_up(struct adapter *adap)
1172 if (!(adap->flags & FULL_INIT_DONE)) {
1173 err = t3_check_fw_version(adap);
1174 if (err == -EINVAL) {
1175 err = upgrade_fw(adap);
1176 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1177 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1178 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1181 err = t3_check_tpsram_version(adap);
1182 if (err == -EINVAL) {
1183 err = update_tpsram(adap);
1184 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1185 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1186 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1190 * Clear interrupts now to catch errors if t3_init_hw fails.
1191 * We clear them again later as initialization may trigger
1192 * conditions that can interrupt.
1194 t3_intr_clear(adap);
1196 err = t3_init_hw(adap, 0);
1200 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1201 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1203 err = setup_sge_qsets(adap);
1208 if (!(adap->flags & NAPI_INIT))
1211 t3_start_sge_timers(adap);
1212 adap->flags |= FULL_INIT_DONE;
1215 t3_intr_clear(adap);
1217 if (adap->flags & USING_MSIX) {
1218 name_msix_vecs(adap);
1219 err = request_irq(adap->msix_info[0].vec,
1220 t3_async_intr_handler, 0,
1221 adap->msix_info[0].desc, adap);
1225 err = request_msix_data_irqs(adap);
1227 free_irq(adap->msix_info[0].vec, adap);
1230 } else if ((err = request_irq(adap->pdev->irq,
1231 t3_intr_handler(adap,
1232 adap->sge.qs[0].rspq.
1234 (adap->flags & USING_MSI) ?
1239 enable_all_napi(adap);
1241 t3_intr_enable(adap);
1243 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1244 is_offload(adap) && init_tp_parity(adap) == 0)
1245 adap->flags |= TP_PARITY_INIT;
1247 if (adap->flags & TP_PARITY_INIT) {
1248 t3_write_reg(adap, A_TP_INT_CAUSE,
1249 F_CMCACHEPERR | F_ARPLUTPERR);
1250 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1253 if (!(adap->flags & QUEUES_BOUND)) {
1254 err = bind_qsets(adap);
1256 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1257 t3_intr_disable(adap);
1258 free_irq_resources(adap);
1261 adap->flags |= QUEUES_BOUND;
1267 CH_ERR(adap, "request_irq failed, err %d\n", err);
1272 * Release resources when all the ports and offloading have been stopped.
1274 static void cxgb_down(struct adapter *adapter)
1276 t3_sge_stop(adapter);
1277 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1278 t3_intr_disable(adapter);
1279 spin_unlock_irq(&adapter->work_lock);
1281 free_irq_resources(adapter);
1282 quiesce_rx(adapter);
1283 t3_sge_stop(adapter);
1284 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1287 static void schedule_chk_task(struct adapter *adap)
1291 timeo = adap->params.linkpoll_period ?
1292 (HZ * adap->params.linkpoll_period) / 10 :
1293 adap->params.stats_update_period * HZ;
1295 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1298 static int offload_open(struct net_device *dev)
1300 struct port_info *pi = netdev_priv(dev);
1301 struct adapter *adapter = pi->adapter;
1302 struct t3cdev *tdev = dev2t3cdev(dev);
1303 int adap_up = adapter->open_device_map & PORT_MASK;
1306 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1309 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1312 t3_tp_set_offload_mode(adapter, 1);
1313 tdev->lldev = adapter->port[0];
1314 err = cxgb3_offload_activate(adapter);
1318 init_port_mtus(adapter);
1319 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1320 adapter->params.b_wnd,
1321 adapter->params.rev == 0 ?
1322 adapter->port[0]->mtu : 0xffff);
1325 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1326 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1328 /* Call back all registered clients */
1329 cxgb3_add_clients(tdev);
1332 /* restore them in case the offload module has changed them */
1334 t3_tp_set_offload_mode(adapter, 0);
1335 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1336 cxgb3_set_dummy_ops(tdev);
1341 static int offload_close(struct t3cdev *tdev)
1343 struct adapter *adapter = tdev2adap(tdev);
1345 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1348 /* Call back all registered clients */
1349 cxgb3_remove_clients(tdev);
1351 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1353 /* Flush work scheduled while releasing TIDs */
1354 flush_scheduled_work();
1357 cxgb3_set_dummy_ops(tdev);
1358 t3_tp_set_offload_mode(adapter, 0);
1359 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1361 if (!adapter->open_device_map)
1364 cxgb3_offload_deactivate(adapter);
1368 static int cxgb_open(struct net_device *dev)
1370 struct port_info *pi = netdev_priv(dev);
1371 struct adapter *adapter = pi->adapter;
1372 int other_ports = adapter->open_device_map & PORT_MASK;
1375 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1378 set_bit(pi->port_id, &adapter->open_device_map);
1379 if (is_offload(adapter) && !ofld_disable) {
1380 err = offload_open(dev);
1383 "Could not initialize offload capabilities\n");
1386 dev->real_num_tx_queues = pi->nqsets;
1388 t3_port_intr_enable(adapter, pi->port_id);
1389 netif_tx_start_all_queues(dev);
1391 schedule_chk_task(adapter);
1393 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1397 static int cxgb_close(struct net_device *dev)
1399 struct port_info *pi = netdev_priv(dev);
1400 struct adapter *adapter = pi->adapter;
1403 if (!adapter->open_device_map)
1406 /* Stop link fault interrupts */
1407 t3_xgm_intr_disable(adapter, pi->port_id);
1408 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1410 t3_port_intr_disable(adapter, pi->port_id);
1411 netif_tx_stop_all_queues(dev);
1412 pi->phy.ops->power_down(&pi->phy, 1);
1413 netif_carrier_off(dev);
1414 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1416 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1417 clear_bit(pi->port_id, &adapter->open_device_map);
1418 spin_unlock_irq(&adapter->work_lock);
1420 if (!(adapter->open_device_map & PORT_MASK))
1421 cancel_delayed_work_sync(&adapter->adap_check_task);
1423 if (!adapter->open_device_map)
1426 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1430 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1432 struct port_info *pi = netdev_priv(dev);
1433 struct adapter *adapter = pi->adapter;
1434 struct net_device_stats *ns = &pi->netstats;
1435 const struct mac_stats *pstats;
1437 spin_lock(&adapter->stats_lock);
1438 pstats = t3_mac_update_stats(&pi->mac);
1439 spin_unlock(&adapter->stats_lock);
1441 ns->tx_bytes = pstats->tx_octets;
1442 ns->tx_packets = pstats->tx_frames;
1443 ns->rx_bytes = pstats->rx_octets;
1444 ns->rx_packets = pstats->rx_frames;
1445 ns->multicast = pstats->rx_mcast_frames;
1447 ns->tx_errors = pstats->tx_underrun;
1448 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1449 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1450 pstats->rx_fifo_ovfl;
1452 /* detailed rx_errors */
1453 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1454 ns->rx_over_errors = 0;
1455 ns->rx_crc_errors = pstats->rx_fcs_errs;
1456 ns->rx_frame_errors = pstats->rx_symbol_errs;
1457 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1458 ns->rx_missed_errors = pstats->rx_cong_drops;
1460 /* detailed tx_errors */
1461 ns->tx_aborted_errors = 0;
1462 ns->tx_carrier_errors = 0;
1463 ns->tx_fifo_errors = pstats->tx_underrun;
1464 ns->tx_heartbeat_errors = 0;
1465 ns->tx_window_errors = 0;
1469 static u32 get_msglevel(struct net_device *dev)
1471 struct port_info *pi = netdev_priv(dev);
1472 struct adapter *adapter = pi->adapter;
1474 return adapter->msg_enable;
1477 static void set_msglevel(struct net_device *dev, u32 val)
1479 struct port_info *pi = netdev_priv(dev);
1480 struct adapter *adapter = pi->adapter;
1482 adapter->msg_enable = val;
1485 static char stats_strings[][ETH_GSTRING_LEN] = {
1488 "TxMulticastFramesOK",
1489 "TxBroadcastFramesOK",
1496 "TxFrames128To255 ",
1497 "TxFrames256To511 ",
1498 "TxFrames512To1023 ",
1499 "TxFrames1024To1518 ",
1500 "TxFrames1519ToMax ",
1504 "RxMulticastFramesOK",
1505 "RxBroadcastFramesOK",
1516 "RxFrames128To255 ",
1517 "RxFrames256To511 ",
1518 "RxFrames512To1023 ",
1519 "RxFrames1024To1518 ",
1520 "RxFrames1519ToMax ",
1533 "CheckTXEnToggled ",
1539 static int get_sset_count(struct net_device *dev, int sset)
1543 return ARRAY_SIZE(stats_strings);
1549 #define T3_REGMAP_SIZE (3 * 1024)
1551 static int get_regs_len(struct net_device *dev)
1553 return T3_REGMAP_SIZE;
1556 static int get_eeprom_len(struct net_device *dev)
1561 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1563 struct port_info *pi = netdev_priv(dev);
1564 struct adapter *adapter = pi->adapter;
1568 spin_lock(&adapter->stats_lock);
1569 t3_get_fw_version(adapter, &fw_vers);
1570 t3_get_tp_version(adapter, &tp_vers);
1571 spin_unlock(&adapter->stats_lock);
1573 strcpy(info->driver, DRV_NAME);
1574 strcpy(info->version, DRV_VERSION);
1575 strcpy(info->bus_info, pci_name(adapter->pdev));
1577 strcpy(info->fw_version, "N/A");
1579 snprintf(info->fw_version, sizeof(info->fw_version),
1580 "%s %u.%u.%u TP %u.%u.%u",
1581 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1582 G_FW_VERSION_MAJOR(fw_vers),
1583 G_FW_VERSION_MINOR(fw_vers),
1584 G_FW_VERSION_MICRO(fw_vers),
1585 G_TP_VERSION_MAJOR(tp_vers),
1586 G_TP_VERSION_MINOR(tp_vers),
1587 G_TP_VERSION_MICRO(tp_vers));
1591 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1593 if (stringset == ETH_SS_STATS)
1594 memcpy(data, stats_strings, sizeof(stats_strings));
1597 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1598 struct port_info *p, int idx)
1601 unsigned long tot = 0;
1603 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1604 tot += adapter->sge.qs[i].port_stats[idx];
1608 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1611 struct port_info *pi = netdev_priv(dev);
1612 struct adapter *adapter = pi->adapter;
1613 const struct mac_stats *s;
1615 spin_lock(&adapter->stats_lock);
1616 s = t3_mac_update_stats(&pi->mac);
1617 spin_unlock(&adapter->stats_lock);
1619 *data++ = s->tx_octets;
1620 *data++ = s->tx_frames;
1621 *data++ = s->tx_mcast_frames;
1622 *data++ = s->tx_bcast_frames;
1623 *data++ = s->tx_pause;
1624 *data++ = s->tx_underrun;
1625 *data++ = s->tx_fifo_urun;
1627 *data++ = s->tx_frames_64;
1628 *data++ = s->tx_frames_65_127;
1629 *data++ = s->tx_frames_128_255;
1630 *data++ = s->tx_frames_256_511;
1631 *data++ = s->tx_frames_512_1023;
1632 *data++ = s->tx_frames_1024_1518;
1633 *data++ = s->tx_frames_1519_max;
1635 *data++ = s->rx_octets;
1636 *data++ = s->rx_frames;
1637 *data++ = s->rx_mcast_frames;
1638 *data++ = s->rx_bcast_frames;
1639 *data++ = s->rx_pause;
1640 *data++ = s->rx_fcs_errs;
1641 *data++ = s->rx_symbol_errs;
1642 *data++ = s->rx_short;
1643 *data++ = s->rx_jabber;
1644 *data++ = s->rx_too_long;
1645 *data++ = s->rx_fifo_ovfl;
1647 *data++ = s->rx_frames_64;
1648 *data++ = s->rx_frames_65_127;
1649 *data++ = s->rx_frames_128_255;
1650 *data++ = s->rx_frames_256_511;
1651 *data++ = s->rx_frames_512_1023;
1652 *data++ = s->rx_frames_1024_1518;
1653 *data++ = s->rx_frames_1519_max;
1655 *data++ = pi->phy.fifo_errors;
1657 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1658 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1659 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1660 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1661 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1665 *data++ = s->rx_cong_drops;
1667 *data++ = s->num_toggled;
1668 *data++ = s->num_resets;
1670 *data++ = s->link_faults;
1673 static inline void reg_block_dump(struct adapter *ap, void *buf,
1674 unsigned int start, unsigned int end)
1676 u32 *p = buf + start;
1678 for (; start <= end; start += sizeof(u32))
1679 *p++ = t3_read_reg(ap, start);
1682 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1685 struct port_info *pi = netdev_priv(dev);
1686 struct adapter *ap = pi->adapter;
1690 * bits 0..9: chip version
1691 * bits 10..15: chip revision
1692 * bit 31: set for PCIe cards
1694 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1697 * We skip the MAC statistics registers because they are clear-on-read.
1698 * Also reading multi-register stats would need to synchronize with the
1699 * periodic mac stats accumulation. Hard to justify the complexity.
1701 memset(buf, 0, T3_REGMAP_SIZE);
1702 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1703 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1704 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1705 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1706 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1707 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1708 XGM_REG(A_XGM_SERDES_STAT3, 1));
1709 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1710 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1713 static int restart_autoneg(struct net_device *dev)
1715 struct port_info *p = netdev_priv(dev);
1717 if (!netif_running(dev))
1719 if (p->link_config.autoneg != AUTONEG_ENABLE)
1721 p->phy.ops->autoneg_restart(&p->phy);
1725 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1727 struct port_info *pi = netdev_priv(dev);
1728 struct adapter *adapter = pi->adapter;
1734 for (i = 0; i < data * 2; i++) {
1735 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1736 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1737 if (msleep_interruptible(500))
1740 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1745 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1747 struct port_info *p = netdev_priv(dev);
1749 cmd->supported = p->link_config.supported;
1750 cmd->advertising = p->link_config.advertising;
1752 if (netif_carrier_ok(dev)) {
1753 cmd->speed = p->link_config.speed;
1754 cmd->duplex = p->link_config.duplex;
1760 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1761 cmd->phy_address = p->phy.mdio.prtad;
1762 cmd->transceiver = XCVR_EXTERNAL;
1763 cmd->autoneg = p->link_config.autoneg;
1769 static int speed_duplex_to_caps(int speed, int duplex)
1775 if (duplex == DUPLEX_FULL)
1776 cap = SUPPORTED_10baseT_Full;
1778 cap = SUPPORTED_10baseT_Half;
1781 if (duplex == DUPLEX_FULL)
1782 cap = SUPPORTED_100baseT_Full;
1784 cap = SUPPORTED_100baseT_Half;
1787 if (duplex == DUPLEX_FULL)
1788 cap = SUPPORTED_1000baseT_Full;
1790 cap = SUPPORTED_1000baseT_Half;
1793 if (duplex == DUPLEX_FULL)
1794 cap = SUPPORTED_10000baseT_Full;
1799 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1800 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1801 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1802 ADVERTISED_10000baseT_Full)
1804 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1806 struct port_info *p = netdev_priv(dev);
1807 struct link_config *lc = &p->link_config;
1809 if (!(lc->supported & SUPPORTED_Autoneg)) {
1811 * PHY offers a single speed/duplex. See if that's what's
1814 if (cmd->autoneg == AUTONEG_DISABLE) {
1815 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1816 if (lc->supported & cap)
1822 if (cmd->autoneg == AUTONEG_DISABLE) {
1823 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1825 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1827 lc->requested_speed = cmd->speed;
1828 lc->requested_duplex = cmd->duplex;
1829 lc->advertising = 0;
1831 cmd->advertising &= ADVERTISED_MASK;
1832 cmd->advertising &= lc->supported;
1833 if (!cmd->advertising)
1835 lc->requested_speed = SPEED_INVALID;
1836 lc->requested_duplex = DUPLEX_INVALID;
1837 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1839 lc->autoneg = cmd->autoneg;
1840 if (netif_running(dev))
1841 t3_link_start(&p->phy, &p->mac, lc);
1845 static void get_pauseparam(struct net_device *dev,
1846 struct ethtool_pauseparam *epause)
1848 struct port_info *p = netdev_priv(dev);
1850 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1851 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1852 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1855 static int set_pauseparam(struct net_device *dev,
1856 struct ethtool_pauseparam *epause)
1858 struct port_info *p = netdev_priv(dev);
1859 struct link_config *lc = &p->link_config;
1861 if (epause->autoneg == AUTONEG_DISABLE)
1862 lc->requested_fc = 0;
1863 else if (lc->supported & SUPPORTED_Autoneg)
1864 lc->requested_fc = PAUSE_AUTONEG;
1868 if (epause->rx_pause)
1869 lc->requested_fc |= PAUSE_RX;
1870 if (epause->tx_pause)
1871 lc->requested_fc |= PAUSE_TX;
1872 if (lc->autoneg == AUTONEG_ENABLE) {
1873 if (netif_running(dev))
1874 t3_link_start(&p->phy, &p->mac, lc);
1876 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1877 if (netif_running(dev))
1878 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1883 static u32 get_rx_csum(struct net_device *dev)
1885 struct port_info *p = netdev_priv(dev);
1887 return p->rx_offload & T3_RX_CSUM;
1890 static int set_rx_csum(struct net_device *dev, u32 data)
1892 struct port_info *p = netdev_priv(dev);
1895 p->rx_offload |= T3_RX_CSUM;
1899 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1900 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1901 set_qset_lro(dev, i, 0);
1906 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1908 struct port_info *pi = netdev_priv(dev);
1909 struct adapter *adapter = pi->adapter;
1910 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1912 e->rx_max_pending = MAX_RX_BUFFERS;
1913 e->rx_mini_max_pending = 0;
1914 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1915 e->tx_max_pending = MAX_TXQ_ENTRIES;
1917 e->rx_pending = q->fl_size;
1918 e->rx_mini_pending = q->rspq_size;
1919 e->rx_jumbo_pending = q->jumbo_size;
1920 e->tx_pending = q->txq_size[0];
1923 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1925 struct port_info *pi = netdev_priv(dev);
1926 struct adapter *adapter = pi->adapter;
1927 struct qset_params *q;
1930 if (e->rx_pending > MAX_RX_BUFFERS ||
1931 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1932 e->tx_pending > MAX_TXQ_ENTRIES ||
1933 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1934 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1935 e->rx_pending < MIN_FL_ENTRIES ||
1936 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1937 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1940 if (adapter->flags & FULL_INIT_DONE)
1943 q = &adapter->params.sge.qset[pi->first_qset];
1944 for (i = 0; i < pi->nqsets; ++i, ++q) {
1945 q->rspq_size = e->rx_mini_pending;
1946 q->fl_size = e->rx_pending;
1947 q->jumbo_size = e->rx_jumbo_pending;
1948 q->txq_size[0] = e->tx_pending;
1949 q->txq_size[1] = e->tx_pending;
1950 q->txq_size[2] = e->tx_pending;
1955 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1957 struct port_info *pi = netdev_priv(dev);
1958 struct adapter *adapter = pi->adapter;
1959 struct qset_params *qsp = &adapter->params.sge.qset[0];
1960 struct sge_qset *qs = &adapter->sge.qs[0];
1962 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1965 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1966 t3_update_qset_coalesce(qs, qsp);
1970 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1972 struct port_info *pi = netdev_priv(dev);
1973 struct adapter *adapter = pi->adapter;
1974 struct qset_params *q = adapter->params.sge.qset;
1976 c->rx_coalesce_usecs = q->coalesce_usecs;
1980 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1983 struct port_info *pi = netdev_priv(dev);
1984 struct adapter *adapter = pi->adapter;
1987 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1991 e->magic = EEPROM_MAGIC;
1992 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1993 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1996 memcpy(data, buf + e->offset, e->len);
2001 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2004 struct port_info *pi = netdev_priv(dev);
2005 struct adapter *adapter = pi->adapter;
2006 u32 aligned_offset, aligned_len;
2011 if (eeprom->magic != EEPROM_MAGIC)
2014 aligned_offset = eeprom->offset & ~3;
2015 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2017 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2018 buf = kmalloc(aligned_len, GFP_KERNEL);
2021 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2022 if (!err && aligned_len > 4)
2023 err = t3_seeprom_read(adapter,
2024 aligned_offset + aligned_len - 4,
2025 (__le32 *) & buf[aligned_len - 4]);
2028 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2032 err = t3_seeprom_wp(adapter, 0);
2036 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2037 err = t3_seeprom_write(adapter, aligned_offset, *p);
2038 aligned_offset += 4;
2042 err = t3_seeprom_wp(adapter, 1);
2049 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2053 memset(&wol->sopass, 0, sizeof(wol->sopass));
2056 static const struct ethtool_ops cxgb_ethtool_ops = {
2057 .get_settings = get_settings,
2058 .set_settings = set_settings,
2059 .get_drvinfo = get_drvinfo,
2060 .get_msglevel = get_msglevel,
2061 .set_msglevel = set_msglevel,
2062 .get_ringparam = get_sge_param,
2063 .set_ringparam = set_sge_param,
2064 .get_coalesce = get_coalesce,
2065 .set_coalesce = set_coalesce,
2066 .get_eeprom_len = get_eeprom_len,
2067 .get_eeprom = get_eeprom,
2068 .set_eeprom = set_eeprom,
2069 .get_pauseparam = get_pauseparam,
2070 .set_pauseparam = set_pauseparam,
2071 .get_rx_csum = get_rx_csum,
2072 .set_rx_csum = set_rx_csum,
2073 .set_tx_csum = ethtool_op_set_tx_csum,
2074 .set_sg = ethtool_op_set_sg,
2075 .get_link = ethtool_op_get_link,
2076 .get_strings = get_strings,
2077 .phys_id = cxgb3_phys_id,
2078 .nway_reset = restart_autoneg,
2079 .get_sset_count = get_sset_count,
2080 .get_ethtool_stats = get_stats,
2081 .get_regs_len = get_regs_len,
2082 .get_regs = get_regs,
2084 .set_tso = ethtool_op_set_tso,
2087 static int in_range(int val, int lo, int hi)
2089 return val < 0 || (val <= hi && val >= lo);
2092 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2094 struct port_info *pi = netdev_priv(dev);
2095 struct adapter *adapter = pi->adapter;
2099 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2103 case CHELSIO_SET_QSET_PARAMS:{
2105 struct qset_params *q;
2106 struct ch_qset_params t;
2107 int q1 = pi->first_qset;
2108 int nqsets = pi->nqsets;
2110 if (!capable(CAP_NET_ADMIN))
2112 if (copy_from_user(&t, useraddr, sizeof(t)))
2114 if (t.qset_idx >= SGE_QSETS)
2116 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2117 !in_range(t.cong_thres, 0, 255) ||
2118 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2120 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2122 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2123 MAX_CTRL_TXQ_ENTRIES) ||
2124 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2126 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2127 MAX_RX_JUMBO_BUFFERS) ||
2128 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2132 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2133 for_each_port(adapter, i) {
2134 pi = adap2pinfo(adapter, i);
2135 if (t.qset_idx >= pi->first_qset &&
2136 t.qset_idx < pi->first_qset + pi->nqsets &&
2137 !(pi->rx_offload & T3_RX_CSUM))
2141 if ((adapter->flags & FULL_INIT_DONE) &&
2142 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2143 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2144 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2145 t.polling >= 0 || t.cong_thres >= 0))
2148 /* Allow setting of any available qset when offload enabled */
2149 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2151 for_each_port(adapter, i) {
2152 pi = adap2pinfo(adapter, i);
2153 nqsets += pi->first_qset + pi->nqsets;
2157 if (t.qset_idx < q1)
2159 if (t.qset_idx > q1 + nqsets - 1)
2162 q = &adapter->params.sge.qset[t.qset_idx];
2164 if (t.rspq_size >= 0)
2165 q->rspq_size = t.rspq_size;
2166 if (t.fl_size[0] >= 0)
2167 q->fl_size = t.fl_size[0];
2168 if (t.fl_size[1] >= 0)
2169 q->jumbo_size = t.fl_size[1];
2170 if (t.txq_size[0] >= 0)
2171 q->txq_size[0] = t.txq_size[0];
2172 if (t.txq_size[1] >= 0)
2173 q->txq_size[1] = t.txq_size[1];
2174 if (t.txq_size[2] >= 0)
2175 q->txq_size[2] = t.txq_size[2];
2176 if (t.cong_thres >= 0)
2177 q->cong_thres = t.cong_thres;
2178 if (t.intr_lat >= 0) {
2179 struct sge_qset *qs =
2180 &adapter->sge.qs[t.qset_idx];
2182 q->coalesce_usecs = t.intr_lat;
2183 t3_update_qset_coalesce(qs, q);
2185 if (t.polling >= 0) {
2186 if (adapter->flags & USING_MSIX)
2187 q->polling = t.polling;
2189 /* No polling with INTx for T3A */
2190 if (adapter->params.rev == 0 &&
2191 !(adapter->flags & USING_MSI))
2194 for (i = 0; i < SGE_QSETS; i++) {
2195 q = &adapter->params.sge.
2197 q->polling = t.polling;
2202 set_qset_lro(dev, t.qset_idx, t.lro);
2206 case CHELSIO_GET_QSET_PARAMS:{
2207 struct qset_params *q;
2208 struct ch_qset_params t;
2209 int q1 = pi->first_qset;
2210 int nqsets = pi->nqsets;
2213 if (copy_from_user(&t, useraddr, sizeof(t)))
2216 /* Display qsets for all ports when offload enabled */
2217 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2219 for_each_port(adapter, i) {
2220 pi = adap2pinfo(adapter, i);
2221 nqsets = pi->first_qset + pi->nqsets;
2225 if (t.qset_idx >= nqsets)
2228 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2229 t.rspq_size = q->rspq_size;
2230 t.txq_size[0] = q->txq_size[0];
2231 t.txq_size[1] = q->txq_size[1];
2232 t.txq_size[2] = q->txq_size[2];
2233 t.fl_size[0] = q->fl_size;
2234 t.fl_size[1] = q->jumbo_size;
2235 t.polling = q->polling;
2237 t.intr_lat = q->coalesce_usecs;
2238 t.cong_thres = q->cong_thres;
2241 if (adapter->flags & USING_MSIX)
2242 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2244 t.vector = adapter->pdev->irq;
2246 if (copy_to_user(useraddr, &t, sizeof(t)))
2250 case CHELSIO_SET_QSET_NUM:{
2251 struct ch_reg edata;
2252 unsigned int i, first_qset = 0, other_qsets = 0;
2254 if (!capable(CAP_NET_ADMIN))
2256 if (adapter->flags & FULL_INIT_DONE)
2258 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2260 if (edata.val < 1 ||
2261 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2264 for_each_port(adapter, i)
2265 if (adapter->port[i] && adapter->port[i] != dev)
2266 other_qsets += adap2pinfo(adapter, i)->nqsets;
2268 if (edata.val + other_qsets > SGE_QSETS)
2271 pi->nqsets = edata.val;
2273 for_each_port(adapter, i)
2274 if (adapter->port[i]) {
2275 pi = adap2pinfo(adapter, i);
2276 pi->first_qset = first_qset;
2277 first_qset += pi->nqsets;
2281 case CHELSIO_GET_QSET_NUM:{
2282 struct ch_reg edata;
2284 edata.cmd = CHELSIO_GET_QSET_NUM;
2285 edata.val = pi->nqsets;
2286 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2290 case CHELSIO_LOAD_FW:{
2292 struct ch_mem_range t;
2294 if (!capable(CAP_SYS_RAWIO))
2296 if (copy_from_user(&t, useraddr, sizeof(t)))
2298 /* Check t.len sanity ? */
2299 fw_data = kmalloc(t.len, GFP_KERNEL);
2304 (fw_data, useraddr + sizeof(t), t.len)) {
2309 ret = t3_load_fw(adapter, fw_data, t.len);
2315 case CHELSIO_SETMTUTAB:{
2319 if (!is_offload(adapter))
2321 if (!capable(CAP_NET_ADMIN))
2323 if (offload_running(adapter))
2325 if (copy_from_user(&m, useraddr, sizeof(m)))
2327 if (m.nmtus != NMTUS)
2329 if (m.mtus[0] < 81) /* accommodate SACK */
2332 /* MTUs must be in ascending order */
2333 for (i = 1; i < NMTUS; ++i)
2334 if (m.mtus[i] < m.mtus[i - 1])
2337 memcpy(adapter->params.mtus, m.mtus,
2338 sizeof(adapter->params.mtus));
2341 case CHELSIO_GET_PM:{
2342 struct tp_params *p = &adapter->params.tp;
2343 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2345 if (!is_offload(adapter))
2347 m.tx_pg_sz = p->tx_pg_size;
2348 m.tx_num_pg = p->tx_num_pgs;
2349 m.rx_pg_sz = p->rx_pg_size;
2350 m.rx_num_pg = p->rx_num_pgs;
2351 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2352 if (copy_to_user(useraddr, &m, sizeof(m)))
2356 case CHELSIO_SET_PM:{
2358 struct tp_params *p = &adapter->params.tp;
2360 if (!is_offload(adapter))
2362 if (!capable(CAP_NET_ADMIN))
2364 if (adapter->flags & FULL_INIT_DONE)
2366 if (copy_from_user(&m, useraddr, sizeof(m)))
2368 if (!is_power_of_2(m.rx_pg_sz) ||
2369 !is_power_of_2(m.tx_pg_sz))
2370 return -EINVAL; /* not power of 2 */
2371 if (!(m.rx_pg_sz & 0x14000))
2372 return -EINVAL; /* not 16KB or 64KB */
2373 if (!(m.tx_pg_sz & 0x1554000))
2375 if (m.tx_num_pg == -1)
2376 m.tx_num_pg = p->tx_num_pgs;
2377 if (m.rx_num_pg == -1)
2378 m.rx_num_pg = p->rx_num_pgs;
2379 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2381 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2382 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2384 p->rx_pg_size = m.rx_pg_sz;
2385 p->tx_pg_size = m.tx_pg_sz;
2386 p->rx_num_pgs = m.rx_num_pg;
2387 p->tx_num_pgs = m.tx_num_pg;
2390 case CHELSIO_GET_MEM:{
2391 struct ch_mem_range t;
2395 if (!is_offload(adapter))
2397 if (!(adapter->flags & FULL_INIT_DONE))
2398 return -EIO; /* need the memory controllers */
2399 if (copy_from_user(&t, useraddr, sizeof(t)))
2401 if ((t.addr & 7) || (t.len & 7))
2403 if (t.mem_id == MEM_CM)
2405 else if (t.mem_id == MEM_PMRX)
2406 mem = &adapter->pmrx;
2407 else if (t.mem_id == MEM_PMTX)
2408 mem = &adapter->pmtx;
2414 * bits 0..9: chip version
2415 * bits 10..15: chip revision
2417 t.version = 3 | (adapter->params.rev << 10);
2418 if (copy_to_user(useraddr, &t, sizeof(t)))
2422 * Read 256 bytes at a time as len can be large and we don't
2423 * want to use huge intermediate buffers.
2425 useraddr += sizeof(t); /* advance to start of buffer */
2427 unsigned int chunk =
2428 min_t(unsigned int, t.len, sizeof(buf));
2431 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2435 if (copy_to_user(useraddr, buf, chunk))
2443 case CHELSIO_SET_TRACE_FILTER:{
2445 const struct trace_params *tp;
2447 if (!capable(CAP_NET_ADMIN))
2449 if (!offload_running(adapter))
2451 if (copy_from_user(&t, useraddr, sizeof(t)))
2454 tp = (const struct trace_params *)&t.sip;
2456 t3_config_trace_filter(adapter, tp, 0,
2460 t3_config_trace_filter(adapter, tp, 1,
2471 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2473 struct mii_ioctl_data *data = if_mii(req);
2474 struct port_info *pi = netdev_priv(dev);
2475 struct adapter *adapter = pi->adapter;
2480 /* Convert phy_id from older PRTAD/DEVAD format */
2481 if (is_10G(adapter) &&
2482 !mdio_phy_id_is_c45(data->phy_id) &&
2483 (data->phy_id & 0x1f00) &&
2484 !(data->phy_id & 0xe0e0))
2485 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2486 data->phy_id & 0x1f);
2489 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2491 return cxgb_extension_ioctl(dev, req->ifr_data);
2497 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2499 struct port_info *pi = netdev_priv(dev);
2500 struct adapter *adapter = pi->adapter;
2503 if (new_mtu < 81) /* accommodate SACK */
2505 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2508 init_port_mtus(adapter);
2509 if (adapter->params.rev == 0 && offload_running(adapter))
2510 t3_load_mtus(adapter, adapter->params.mtus,
2511 adapter->params.a_wnd, adapter->params.b_wnd,
2512 adapter->port[0]->mtu);
2516 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2518 struct port_info *pi = netdev_priv(dev);
2519 struct adapter *adapter = pi->adapter;
2520 struct sockaddr *addr = p;
2522 if (!is_valid_ether_addr(addr->sa_data))
2525 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2526 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2527 if (offload_running(adapter))
2528 write_smt_entry(adapter, pi->port_id);
2533 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2534 * @adap: the adapter
2537 * Ensures that current Rx processing on any of the queues associated with
2538 * the given port completes before returning. We do this by acquiring and
2539 * releasing the locks of the response queues associated with the port.
2541 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2545 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2546 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2548 spin_lock_irq(&q->lock);
2549 spin_unlock_irq(&q->lock);
2553 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2555 struct port_info *pi = netdev_priv(dev);
2556 struct adapter *adapter = pi->adapter;
2559 if (adapter->params.rev > 0)
2560 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2562 /* single control for all ports */
2563 unsigned int i, have_vlans = 0;
2564 for_each_port(adapter, i)
2565 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2567 t3_set_vlan_accel(adapter, 1, have_vlans);
2569 t3_synchronize_rx(adapter, pi);
2572 #ifdef CONFIG_NET_POLL_CONTROLLER
2573 static void cxgb_netpoll(struct net_device *dev)
2575 struct port_info *pi = netdev_priv(dev);
2576 struct adapter *adapter = pi->adapter;
2579 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2580 struct sge_qset *qs = &adapter->sge.qs[qidx];
2583 if (adapter->flags & USING_MSIX)
2588 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2594 * Periodic accumulation of MAC statistics.
2596 static void mac_stats_update(struct adapter *adapter)
2600 for_each_port(adapter, i) {
2601 struct net_device *dev = adapter->port[i];
2602 struct port_info *p = netdev_priv(dev);
2604 if (netif_running(dev)) {
2605 spin_lock(&adapter->stats_lock);
2606 t3_mac_update_stats(&p->mac);
2607 spin_unlock(&adapter->stats_lock);
2612 static void check_link_status(struct adapter *adapter)
2616 for_each_port(adapter, i) {
2617 struct net_device *dev = adapter->port[i];
2618 struct port_info *p = netdev_priv(dev);
2621 spin_lock_irq(&adapter->work_lock);
2622 link_fault = p->link_fault;
2623 spin_unlock_irq(&adapter->work_lock);
2626 t3_link_fault(adapter, i);
2630 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2631 t3_xgm_intr_disable(adapter, i);
2632 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2634 t3_link_changed(adapter, i);
2635 t3_xgm_intr_enable(adapter, i);
2640 static void check_t3b2_mac(struct adapter *adapter)
2644 if (!rtnl_trylock()) /* synchronize with ifdown */
2647 for_each_port(adapter, i) {
2648 struct net_device *dev = adapter->port[i];
2649 struct port_info *p = netdev_priv(dev);
2652 if (!netif_running(dev))
2656 if (netif_running(dev) && netif_carrier_ok(dev))
2657 status = t3b2_mac_watchdog_task(&p->mac);
2659 p->mac.stats.num_toggled++;
2660 else if (status == 2) {
2661 struct cmac *mac = &p->mac;
2663 t3_mac_set_mtu(mac, dev->mtu);
2664 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2665 cxgb_set_rxmode(dev);
2666 t3_link_start(&p->phy, mac, &p->link_config);
2667 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2668 t3_port_intr_enable(adapter, p->port_id);
2669 p->mac.stats.num_resets++;
2676 static void t3_adap_check_task(struct work_struct *work)
2678 struct adapter *adapter = container_of(work, struct adapter,
2679 adap_check_task.work);
2680 const struct adapter_params *p = &adapter->params;
2682 unsigned int v, status, reset;
2684 adapter->check_task_cnt++;
2686 check_link_status(adapter);
2688 /* Accumulate MAC stats if needed */
2689 if (!p->linkpoll_period ||
2690 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2691 p->stats_update_period) {
2692 mac_stats_update(adapter);
2693 adapter->check_task_cnt = 0;
2696 if (p->rev == T3_REV_B2)
2697 check_t3b2_mac(adapter);
2700 * Scan the XGMAC's to check for various conditions which we want to
2701 * monitor in a periodic polling manner rather than via an interrupt
2702 * condition. This is used for conditions which would otherwise flood
2703 * the system with interrupts and we only really need to know that the
2704 * conditions are "happening" ... For each condition we count the
2705 * detection of the condition and reset it for the next polling loop.
2707 for_each_port(adapter, port) {
2708 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2711 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2713 if (cause & F_RXFIFO_OVERFLOW) {
2714 mac->stats.rx_fifo_ovfl++;
2715 reset |= F_RXFIFO_OVERFLOW;
2718 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2722 * We do the same as above for FL_EMPTY interrupts.
2724 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2727 if (status & F_FLEMPTY) {
2728 struct sge_qset *qs = &adapter->sge.qs[0];
2733 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2737 qs->fl[i].empty += (v & 1);
2745 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2747 /* Schedule the next check update if any port is active. */
2748 spin_lock_irq(&adapter->work_lock);
2749 if (adapter->open_device_map & PORT_MASK)
2750 schedule_chk_task(adapter);
2751 spin_unlock_irq(&adapter->work_lock);
2755 * Processes external (PHY) interrupts in process context.
2757 static void ext_intr_task(struct work_struct *work)
2759 struct adapter *adapter = container_of(work, struct adapter,
2760 ext_intr_handler_task);
2763 /* Disable link fault interrupts */
2764 for_each_port(adapter, i) {
2765 struct net_device *dev = adapter->port[i];
2766 struct port_info *p = netdev_priv(dev);
2768 t3_xgm_intr_disable(adapter, i);
2769 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2772 /* Re-enable link fault interrupts */
2773 t3_phy_intr_handler(adapter);
2775 for_each_port(adapter, i)
2776 t3_xgm_intr_enable(adapter, i);
2778 /* Now reenable external interrupts */
2779 spin_lock_irq(&adapter->work_lock);
2780 if (adapter->slow_intr_mask) {
2781 adapter->slow_intr_mask |= F_T3DBG;
2782 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2783 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2784 adapter->slow_intr_mask);
2786 spin_unlock_irq(&adapter->work_lock);
2790 * Interrupt-context handler for external (PHY) interrupts.
2792 void t3_os_ext_intr_handler(struct adapter *adapter)
2795 * Schedule a task to handle external interrupts as they may be slow
2796 * and we use a mutex to protect MDIO registers. We disable PHY
2797 * interrupts in the meantime and let the task reenable them when
2800 spin_lock(&adapter->work_lock);
2801 if (adapter->slow_intr_mask) {
2802 adapter->slow_intr_mask &= ~F_T3DBG;
2803 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2804 adapter->slow_intr_mask);
2805 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2807 spin_unlock(&adapter->work_lock);
2810 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2812 struct net_device *netdev = adapter->port[port_id];
2813 struct port_info *pi = netdev_priv(netdev);
2815 spin_lock(&adapter->work_lock);
2817 spin_unlock(&adapter->work_lock);
2820 static int t3_adapter_error(struct adapter *adapter, int reset)
2824 if (is_offload(adapter) &&
2825 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2826 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2827 offload_close(&adapter->tdev);
2830 /* Stop all ports */
2831 for_each_port(adapter, i) {
2832 struct net_device *netdev = adapter->port[i];
2834 if (netif_running(netdev))
2838 /* Stop SGE timers */
2839 t3_stop_sge_timers(adapter);
2841 adapter->flags &= ~FULL_INIT_DONE;
2844 ret = t3_reset_adapter(adapter);
2846 pci_disable_device(adapter->pdev);
2851 static int t3_reenable_adapter(struct adapter *adapter)
2853 if (pci_enable_device(adapter->pdev)) {
2854 dev_err(&adapter->pdev->dev,
2855 "Cannot re-enable PCI device after reset.\n");
2858 pci_set_master(adapter->pdev);
2859 pci_restore_state(adapter->pdev);
2860 pci_save_state(adapter->pdev);
2862 /* Free sge resources */
2863 t3_free_sge_resources(adapter);
2865 if (t3_replay_prep_adapter(adapter))
2873 static void t3_resume_ports(struct adapter *adapter)
2877 /* Restart the ports */
2878 for_each_port(adapter, i) {
2879 struct net_device *netdev = adapter->port[i];
2881 if (netif_running(netdev)) {
2882 if (cxgb_open(netdev)) {
2883 dev_err(&adapter->pdev->dev,
2884 "can't bring device back up"
2891 if (is_offload(adapter) && !ofld_disable)
2892 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2896 * processes a fatal error.
2897 * Bring the ports down, reset the chip, bring the ports back up.
2899 static void fatal_error_task(struct work_struct *work)
2901 struct adapter *adapter = container_of(work, struct adapter,
2902 fatal_error_handler_task);
2906 err = t3_adapter_error(adapter, 1);
2908 err = t3_reenable_adapter(adapter);
2910 t3_resume_ports(adapter);
2912 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2916 void t3_fatal_err(struct adapter *adapter)
2918 unsigned int fw_status[4];
2920 if (adapter->flags & FULL_INIT_DONE) {
2921 t3_sge_stop(adapter);
2922 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2923 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2924 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2925 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2927 spin_lock(&adapter->work_lock);
2928 t3_intr_disable(adapter);
2929 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2930 spin_unlock(&adapter->work_lock);
2932 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2933 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2934 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2935 fw_status[0], fw_status[1],
2936 fw_status[2], fw_status[3]);
2940 * t3_io_error_detected - called when PCI error is detected
2941 * @pdev: Pointer to PCI device
2942 * @state: The current pci connection state
2944 * This function is called after a PCI bus error affecting
2945 * this device has been detected.
2947 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2948 pci_channel_state_t state)
2950 struct adapter *adapter = pci_get_drvdata(pdev);
2953 if (state == pci_channel_io_perm_failure)
2954 return PCI_ERS_RESULT_DISCONNECT;
2956 ret = t3_adapter_error(adapter, 0);
2958 /* Request a slot reset. */
2959 return PCI_ERS_RESULT_NEED_RESET;
2963 * t3_io_slot_reset - called after the pci bus has been reset.
2964 * @pdev: Pointer to PCI device
2966 * Restart the card from scratch, as if from a cold-boot.
2968 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2970 struct adapter *adapter = pci_get_drvdata(pdev);
2972 if (!t3_reenable_adapter(adapter))
2973 return PCI_ERS_RESULT_RECOVERED;
2975 return PCI_ERS_RESULT_DISCONNECT;
2979 * t3_io_resume - called when traffic can start flowing again.
2980 * @pdev: Pointer to PCI device
2982 * This callback is called when the error recovery driver tells us that
2983 * its OK to resume normal operation.
2985 static void t3_io_resume(struct pci_dev *pdev)
2987 struct adapter *adapter = pci_get_drvdata(pdev);
2989 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2990 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2992 t3_resume_ports(adapter);
2995 static struct pci_error_handlers t3_err_handler = {
2996 .error_detected = t3_io_error_detected,
2997 .slot_reset = t3_io_slot_reset,
2998 .resume = t3_io_resume,
3002 * Set the number of qsets based on the number of CPUs and the number of ports,
3003 * not to exceed the number of available qsets, assuming there are enough qsets
3006 static void set_nqsets(struct adapter *adap)
3009 int num_cpus = num_online_cpus();
3010 int hwports = adap->params.nports;
3011 int nqsets = adap->msix_nvectors - 1;
3013 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3015 (hwports * nqsets > SGE_QSETS ||
3016 num_cpus >= nqsets / hwports))
3018 if (nqsets > num_cpus)
3020 if (nqsets < 1 || hwports == 4)
3025 for_each_port(adap, i) {
3026 struct port_info *pi = adap2pinfo(adap, i);
3029 pi->nqsets = nqsets;
3030 j = pi->first_qset + nqsets;
3032 dev_info(&adap->pdev->dev,
3033 "Port %d using %d queue sets.\n", i, nqsets);
3037 static int __devinit cxgb_enable_msix(struct adapter *adap)
3039 struct msix_entry entries[SGE_QSETS + 1];
3043 vectors = ARRAY_SIZE(entries);
3044 for (i = 0; i < vectors; ++i)
3045 entries[i].entry = i;
3047 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3051 pci_disable_msix(adap->pdev);
3053 if (!err && vectors < (adap->params.nports + 1)) {
3054 pci_disable_msix(adap->pdev);
3059 for (i = 0; i < vectors; ++i)
3060 adap->msix_info[i].vec = entries[i].vector;
3061 adap->msix_nvectors = vectors;
3067 static void __devinit print_port_info(struct adapter *adap,
3068 const struct adapter_info *ai)
3070 static const char *pci_variant[] = {
3071 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3078 snprintf(buf, sizeof(buf), "%s x%d",
3079 pci_variant[adap->params.pci.variant],
3080 adap->params.pci.width);
3082 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3083 pci_variant[adap->params.pci.variant],
3084 adap->params.pci.speed, adap->params.pci.width);
3086 for_each_port(adap, i) {
3087 struct net_device *dev = adap->port[i];
3088 const struct port_info *pi = netdev_priv(dev);
3090 if (!test_bit(i, &adap->registered_device_map))
3092 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3093 dev->name, ai->desc, pi->phy.desc,
3094 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3095 (adap->flags & USING_MSIX) ? " MSI-X" :
3096 (adap->flags & USING_MSI) ? " MSI" : "");
3097 if (adap->name == dev->name && adap->params.vpd.mclk)
3099 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3100 adap->name, t3_mc7_size(&adap->cm) >> 20,
3101 t3_mc7_size(&adap->pmtx) >> 20,
3102 t3_mc7_size(&adap->pmrx) >> 20,
3103 adap->params.vpd.sn);
3107 static const struct net_device_ops cxgb_netdev_ops = {
3108 .ndo_open = cxgb_open,
3109 .ndo_stop = cxgb_close,
3110 .ndo_start_xmit = t3_eth_xmit,
3111 .ndo_get_stats = cxgb_get_stats,
3112 .ndo_validate_addr = eth_validate_addr,
3113 .ndo_set_multicast_list = cxgb_set_rxmode,
3114 .ndo_do_ioctl = cxgb_ioctl,
3115 .ndo_change_mtu = cxgb_change_mtu,
3116 .ndo_set_mac_address = cxgb_set_mac_addr,
3117 .ndo_vlan_rx_register = vlan_rx_register,
3118 #ifdef CONFIG_NET_POLL_CONTROLLER
3119 .ndo_poll_controller = cxgb_netpoll,
3123 static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3125 struct port_info *pi = netdev_priv(dev);
3127 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3128 pi->iscsic.mac_addr[3] |= 0x80;
3131 static int __devinit init_one(struct pci_dev *pdev,
3132 const struct pci_device_id *ent)
3134 static int version_printed;
3136 int i, err, pci_using_dac = 0;
3137 resource_size_t mmio_start, mmio_len;
3138 const struct adapter_info *ai;
3139 struct adapter *adapter = NULL;
3140 struct port_info *pi;
3142 if (!version_printed) {
3143 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3148 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3150 printk(KERN_ERR DRV_NAME
3151 ": cannot initialize work queue\n");
3156 err = pci_request_regions(pdev, DRV_NAME);
3158 /* Just info, some other driver may have claimed the device. */
3159 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3163 err = pci_enable_device(pdev);
3165 dev_err(&pdev->dev, "cannot enable PCI device\n");
3166 goto out_release_regions;
3169 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3171 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3173 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3174 "coherent allocations\n");
3175 goto out_disable_device;
3177 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3178 dev_err(&pdev->dev, "no usable DMA configuration\n");
3179 goto out_disable_device;
3182 pci_set_master(pdev);
3183 pci_save_state(pdev);
3185 mmio_start = pci_resource_start(pdev, 0);
3186 mmio_len = pci_resource_len(pdev, 0);
3187 ai = t3_get_adapter_info(ent->driver_data);
3189 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3192 goto out_disable_device;
3195 adapter->nofail_skb =
3196 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3197 if (!adapter->nofail_skb) {
3198 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3200 goto out_free_adapter;
3203 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3204 if (!adapter->regs) {
3205 dev_err(&pdev->dev, "cannot map device registers\n");
3207 goto out_free_adapter;
3210 adapter->pdev = pdev;
3211 adapter->name = pci_name(pdev);
3212 adapter->msg_enable = dflt_msg_enable;
3213 adapter->mmio_len = mmio_len;
3215 mutex_init(&adapter->mdio_lock);
3216 spin_lock_init(&adapter->work_lock);
3217 spin_lock_init(&adapter->stats_lock);
3219 INIT_LIST_HEAD(&adapter->adapter_list);
3220 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3221 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3222 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3224 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3225 struct net_device *netdev;
3227 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3233 SET_NETDEV_DEV(netdev, &pdev->dev);
3235 adapter->port[i] = netdev;
3236 pi = netdev_priv(netdev);
3237 pi->adapter = adapter;
3238 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3240 netif_carrier_off(netdev);
3241 netif_tx_stop_all_queues(netdev);
3242 netdev->irq = pdev->irq;
3243 netdev->mem_start = mmio_start;
3244 netdev->mem_end = mmio_start + mmio_len - 1;
3245 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3246 netdev->features |= NETIF_F_GRO;
3248 netdev->features |= NETIF_F_HIGHDMA;
3250 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3251 netdev->netdev_ops = &cxgb_netdev_ops;
3252 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3255 pci_set_drvdata(pdev, adapter);
3256 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3262 * The card is now ready to go. If any errors occur during device
3263 * registration we do not fail the whole card but rather proceed only
3264 * with the ports we manage to register successfully. However we must
3265 * register at least one net device.
3267 for_each_port(adapter, i) {
3268 err = register_netdev(adapter->port[i]);
3270 dev_warn(&pdev->dev,
3271 "cannot register net device %s, skipping\n",
3272 adapter->port[i]->name);
3275 * Change the name we use for messages to the name of
3276 * the first successfully registered interface.
3278 if (!adapter->registered_device_map)
3279 adapter->name = adapter->port[i]->name;
3281 __set_bit(i, &adapter->registered_device_map);
3284 if (!adapter->registered_device_map) {
3285 dev_err(&pdev->dev, "could not register any net devices\n");
3289 for_each_port(adapter, i)
3290 cxgb3_init_iscsi_mac(adapter->port[i]);
3292 /* Driver's ready. Reflect it on LEDs */
3293 t3_led_ready(adapter);
3295 if (is_offload(adapter)) {
3296 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3297 cxgb3_adapter_ofld(adapter);
3300 /* See what interrupts we'll be using */
3301 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3302 adapter->flags |= USING_MSIX;
3303 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3304 adapter->flags |= USING_MSI;
3306 set_nqsets(adapter);
3308 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3311 print_port_info(adapter, ai);
3315 iounmap(adapter->regs);
3316 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3317 if (adapter->port[i])
3318 free_netdev(adapter->port[i]);
3324 pci_disable_device(pdev);
3325 out_release_regions:
3326 pci_release_regions(pdev);
3327 pci_set_drvdata(pdev, NULL);
3331 static void __devexit remove_one(struct pci_dev *pdev)
3333 struct adapter *adapter = pci_get_drvdata(pdev);
3338 t3_sge_stop(adapter);
3339 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3342 if (is_offload(adapter)) {
3343 cxgb3_adapter_unofld(adapter);
3344 if (test_bit(OFFLOAD_DEVMAP_BIT,
3345 &adapter->open_device_map))
3346 offload_close(&adapter->tdev);
3349 for_each_port(adapter, i)
3350 if (test_bit(i, &adapter->registered_device_map))
3351 unregister_netdev(adapter->port[i]);
3353 t3_stop_sge_timers(adapter);
3354 t3_free_sge_resources(adapter);
3355 cxgb_disable_msi(adapter);
3357 for_each_port(adapter, i)
3358 if (adapter->port[i])
3359 free_netdev(adapter->port[i]);
3361 iounmap(adapter->regs);
3362 if (adapter->nofail_skb)
3363 kfree_skb(adapter->nofail_skb);
3365 pci_release_regions(pdev);
3366 pci_disable_device(pdev);
3367 pci_set_drvdata(pdev, NULL);
3371 static struct pci_driver driver = {
3373 .id_table = cxgb3_pci_tbl,
3375 .remove = __devexit_p(remove_one),
3376 .err_handler = &t3_err_handler,
3379 static int __init cxgb3_init_module(void)
3383 cxgb3_offload_init();
3385 ret = pci_register_driver(&driver);
3389 static void __exit cxgb3_cleanup_module(void)
3391 pci_unregister_driver(&driver);
3393 destroy_workqueue(cxgb3_wq);
3396 module_init(cxgb3_init_module);
3397 module_exit(cxgb3_cleanup_module);