2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if_vlan.h>
45 #include <linux/init.h>
46 #include <linux/log2.h>
47 #include <linux/mdio.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/mutex.h>
51 #include <linux/netdevice.h>
52 #include <linux/pci.h>
53 #include <linux/aer.h>
54 #include <linux/rtnetlink.h>
55 #include <linux/sched.h>
56 #include <linux/seq_file.h>
57 #include <linux/sockios.h>
58 #include <linux/vmalloc.h>
59 #include <linux/workqueue.h>
60 #include <net/neighbour.h>
61 #include <net/netevent.h>
62 #include <asm/uaccess.h>
70 #define DRV_VERSION "1.0.0-ko"
71 #define DRV_DESC "Chelsio T4 Network Driver"
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
78 #define MAX_SGE_TIMERVAL 200U
81 MEMWIN0_APERTURE = 65536,
82 MEMWIN0_BASE = 0x30000,
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
85 MEMWIN2_APERTURE = 2048,
86 MEMWIN2_BASE = 0x1b800,
90 MAX_TXQ_ENTRIES = 16384,
91 MAX_CTRL_TXQ_ENTRIES = 1024,
92 MAX_RSPQ_ENTRIES = 16384,
93 MAX_RX_BUFFERS = 16384,
95 MIN_CTRL_TXQ_ENTRIES = 32,
96 MIN_RSPQ_ENTRIES = 128,
100 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
104 #define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
106 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
107 CH_DEVICE(0xa000), /* PE10K */
111 #define FW_FNAME "cxgb4/t4fw.bin"
113 MODULE_DESCRIPTION(DRV_DESC);
114 MODULE_AUTHOR("Chelsio Communications");
115 MODULE_LICENSE("Dual BSD/GPL");
116 MODULE_VERSION(DRV_VERSION);
117 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
118 MODULE_FIRMWARE(FW_FNAME);
120 static int dflt_msg_enable = DFLT_MSG_ENABLE;
122 module_param(dflt_msg_enable, int, 0644);
123 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
126 * The driver uses the best interrupt scheme available on a platform in the
127 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
128 * of these schemes the driver may consider as follows:
130 * msi = 2: choose from among all three options
131 * msi = 1: only consider MSI and INTx interrupts
132 * msi = 0: force INTx interrupts
136 module_param(msi, int, 0644);
137 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
140 * Queue interrupt hold-off timer values. Queues default to the first of these
143 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
145 module_param_array(intr_holdoff, uint, NULL, 0644);
146 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
147 "0..4 in microseconds");
149 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
151 module_param_array(intr_cnt, uint, NULL, 0644);
152 MODULE_PARM_DESC(intr_cnt,
153 "thresholds 1..3 for queue interrupt packet counters");
157 #ifdef CONFIG_PCI_IOV
158 module_param(vf_acls, bool, 0644);
159 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
161 static unsigned int num_vf[4];
163 module_param_array(num_vf, uint, NULL, 0644);
164 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
167 static struct dentry *cxgb4_debugfs_root;
169 static LIST_HEAD(adapter_list);
170 static DEFINE_MUTEX(uld_mutex);
171 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
172 static const char *uld_str[] = { "RDMA", "iSCSI" };
174 static void link_report(struct net_device *dev)
176 if (!netif_carrier_ok(dev))
177 netdev_info(dev, "link down\n");
179 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
181 const char *s = "10Mbps";
182 const struct port_info *p = netdev_priv(dev);
184 switch (p->link_cfg.speed) {
196 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
201 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
203 struct net_device *dev = adapter->port[port_id];
205 /* Skip changes from disabled ports. */
206 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
208 netif_carrier_on(dev);
210 netif_carrier_off(dev);
216 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
218 static const char *mod_str[] = {
219 NULL, "LR", "SR", "ER", "passive DA", "active DA"
222 const struct net_device *dev = adap->port[port_id];
223 const struct port_info *pi = netdev_priv(dev);
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 netdev_info(dev, "port module unplugged\n");
228 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
232 * Configure the exact and hash address filters to handle a port's multicast
233 * and secondary unicast MAC addresses.
235 static int set_addr_filters(const struct net_device *dev, bool sleep)
243 const struct dev_addr_list *d;
244 const struct netdev_hw_addr *ha;
245 int uc_cnt = netdev_uc_count(dev);
246 const struct port_info *pi = netdev_priv(dev);
248 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha, dev) {
250 addr[naddr++] = ha->addr;
251 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
252 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
253 naddr, addr, filt_idx, &uhash, sleep);
262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(d, dev) {
264 addr[naddr++] = d->dmi_addr;
265 if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep);
276 return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
277 uhash | mhash, sleep);
281 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
282 * If @mtu is -1 it is left unchanged.
284 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
287 struct port_info *pi = netdev_priv(dev);
289 ret = set_addr_filters(dev, sleep_ok);
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0,
293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
299 * link_start - enable a port
300 * @dev: the port to enable
302 * Performs the MAC and PHY actions needed to enable a port.
304 static int link_start(struct net_device *dev)
307 struct port_info *pi = netdev_priv(dev);
310 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly.
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true,
320 pi->xact_addr_filt = ret;
325 ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
327 ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
332 * Response queue handler for the FW event queue.
334 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
335 const struct pkt_gl *gl)
337 u8 opcode = ((const struct rss_header *)rsp)->opcode;
339 rsp++; /* skip RSS header */
340 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
341 const struct cpl_sge_egr_update *p = (void *)rsp;
342 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
343 struct sge_txq *txq = q->adap->sge.egr_map[qid];
346 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
347 struct sge_eth_txq *eq;
349 eq = container_of(txq, struct sge_eth_txq, q);
350 netif_tx_wake_queue(eq->txq);
352 struct sge_ofld_txq *oq;
354 oq = container_of(txq, struct sge_ofld_txq, q);
355 tasklet_schedule(&oq->qresume_tsk);
357 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
358 const struct cpl_fw6_msg *p = (void *)rsp;
361 t4_handle_fw_rpl(q->adap, p->data);
362 } else if (opcode == CPL_L2T_WRITE_RPL) {
363 const struct cpl_l2t_write_rpl *p = (void *)rsp;
365 do_l2t_write_rpl(q->adap, p);
367 dev_err(q->adap->pdev_dev,
368 "unexpected CPL %#x on FW event queue\n", opcode);
373 * uldrx_handler - response queue handler for ULD queues
374 * @q: the response queue that received the packet
375 * @rsp: the response queue descriptor holding the offload message
376 * @gl: the gather list of packet fragments
378 * Deliver an ingress offload packet to a ULD. All processing is done by
379 * the ULD, we just maintain statistics.
381 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
382 const struct pkt_gl *gl)
384 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
386 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
392 else if (gl == CXGB4_MSG_AN)
399 static void disable_msi(struct adapter *adapter)
401 if (adapter->flags & USING_MSIX) {
402 pci_disable_msix(adapter->pdev);
403 adapter->flags &= ~USING_MSIX;
404 } else if (adapter->flags & USING_MSI) {
405 pci_disable_msi(adapter->pdev);
406 adapter->flags &= ~USING_MSI;
411 * Interrupt handler for non-data events used with MSI-X.
413 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
415 struct adapter *adap = cookie;
417 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
420 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
422 t4_slow_intr_handler(adap);
427 * Name the MSI-X interrupts.
429 static void name_msix_vecs(struct adapter *adap)
431 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
433 /* non-data interrupts */
434 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
435 adap->msix_info[0].desc[n] = 0;
438 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
439 adap->msix_info[1].desc[n] = 0;
441 /* Ethernet queues */
442 for_each_port(adap, j) {
443 struct net_device *d = adap->port[j];
444 const struct port_info *pi = netdev_priv(d);
446 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
447 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
449 adap->msix_info[msi_idx].desc[n] = 0;
454 for_each_ofldrxq(&adap->sge, i) {
455 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
457 adap->msix_info[msi_idx++].desc[n] = 0;
459 for_each_rdmarxq(&adap->sge, i) {
460 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
462 adap->msix_info[msi_idx++].desc[n] = 0;
466 static int request_msix_queue_irqs(struct adapter *adap)
468 struct sge *s = &adap->sge;
469 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
471 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
472 adap->msix_info[1].desc, &s->fw_evtq);
476 for_each_ethrxq(s, ethqidx) {
477 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
478 adap->msix_info[msi].desc,
479 &s->ethrxq[ethqidx].rspq);
484 for_each_ofldrxq(s, ofldqidx) {
485 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
486 adap->msix_info[msi].desc,
487 &s->ofldrxq[ofldqidx].rspq);
492 for_each_rdmarxq(s, rdmaqidx) {
493 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
494 adap->msix_info[msi].desc,
495 &s->rdmarxq[rdmaqidx].rspq);
503 while (--rdmaqidx >= 0)
504 free_irq(adap->msix_info[--msi].vec,
505 &s->rdmarxq[rdmaqidx].rspq);
506 while (--ofldqidx >= 0)
507 free_irq(adap->msix_info[--msi].vec,
508 &s->ofldrxq[ofldqidx].rspq);
509 while (--ethqidx >= 0)
510 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
511 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
515 static void free_msix_queue_irqs(struct adapter *adap)
518 struct sge *s = &adap->sge;
520 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
521 for_each_ethrxq(s, i)
522 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
523 for_each_ofldrxq(s, i)
524 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
525 for_each_rdmarxq(s, i)
526 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
530 * setup_rss - configure RSS
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
540 static int setup_rss(struct adapter *adap)
543 u16 rss[MAX_ETH_QSETS];
545 for_each_port(adap, i) {
546 const struct port_info *pi = adap2pinfo(adap, i);
547 const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
549 for (j = 0; j < pi->nqsets; j++)
550 rss[j] = q[j].rspq.abs_id;
552 err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
561 * Wait until all NAPI handlers are descheduled.
563 static void quiesce_rx(struct adapter *adap)
567 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
568 struct sge_rspq *q = adap->sge.ingr_map[i];
571 napi_disable(&q->napi);
576 * Enable NAPI scheduling and interrupt generation for all Rx queues.
578 static void enable_rx(struct adapter *adap)
582 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
583 struct sge_rspq *q = adap->sge.ingr_map[i];
588 napi_enable(&q->napi);
589 /* 0-increment GTS to start the timer and enable interrupts */
590 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
591 SEINTARM(q->intr_params) |
592 INGRESSQID(q->cntxt_id));
597 * setup_sge_queues - configure SGE Tx/Rx/response queues
600 * Determines how many sets of SGE queues to use and initializes them.
601 * We support multiple queue sets per port if we have MSI-X, otherwise
602 * just one queue set per port.
604 static int setup_sge_queues(struct adapter *adap)
606 int err, msi_idx, i, j;
607 struct sge *s = &adap->sge;
609 bitmap_zero(s->starving_fl, MAX_EGRQ);
610 bitmap_zero(s->txq_maperr, MAX_EGRQ);
612 if (adap->flags & USING_MSIX)
613 msi_idx = 1; /* vector 0 is for non-queue interrupts */
615 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
619 msi_idx = -((int)s->intrq.abs_id + 1);
622 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
623 msi_idx, NULL, fwevtq_handler);
625 freeout: t4_free_sge_resources(adap);
629 for_each_port(adap, i) {
630 struct net_device *dev = adap->port[i];
631 struct port_info *pi = netdev_priv(dev);
632 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
633 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
635 for (j = 0; j < pi->nqsets; j++, q++) {
638 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
644 memset(&q->stats, 0, sizeof(q->stats));
646 for (j = 0; j < pi->nqsets; j++, t++) {
647 err = t4_sge_alloc_eth_txq(adap, t, dev,
648 netdev_get_tx_queue(dev, j),
649 s->fw_evtq.cntxt_id);
655 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
656 for_each_ofldrxq(s, i) {
657 struct sge_ofld_rxq *q = &s->ofldrxq[i];
658 struct net_device *dev = adap->port[i / j];
662 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
663 &q->fl, uldrx_handler);
666 memset(&q->stats, 0, sizeof(q->stats));
667 s->ofld_rxq[i] = q->rspq.abs_id;
668 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
669 s->fw_evtq.cntxt_id);
674 for_each_rdmarxq(s, i) {
675 struct sge_ofld_rxq *q = &s->rdmarxq[i];
679 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
680 msi_idx, &q->fl, uldrx_handler);
683 memset(&q->stats, 0, sizeof(q->stats));
684 s->rdma_rxq[i] = q->rspq.abs_id;
687 for_each_port(adap, i) {
689 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
690 * have RDMA queues, and that's the right value.
692 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
694 s->rdmarxq[i].rspq.cntxt_id);
699 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
700 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
701 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
706 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
707 * started but failed, and a negative errno if flash load couldn't start.
709 static int upgrade_fw(struct adapter *adap)
713 const struct fw_hdr *hdr;
714 const struct firmware *fw;
715 struct device *dev = adap->pdev_dev;
717 ret = request_firmware(&fw, FW_FNAME, dev);
719 dev_err(dev, "unable to load firmware image " FW_FNAME
720 ", error %d\n", ret);
724 hdr = (const struct fw_hdr *)fw->data;
725 vers = ntohl(hdr->fw_ver);
726 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
727 ret = -EINVAL; /* wrong major version, won't do */
732 * If the flash FW is unusable or we found something newer, load it.
734 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
735 vers > adap->params.fw_vers) {
736 ret = -t4_load_fw(adap, fw->data, fw->size);
738 dev_info(dev, "firmware upgraded to version %pI4 from "
739 FW_FNAME "\n", &hdr->fw_ver);
741 out: release_firmware(fw);
746 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
747 * The allocated memory is cleared.
749 void *t4_alloc_mem(size_t size)
751 void *p = kmalloc(size, GFP_KERNEL);
761 * Free memory allocated through alloc_mem().
763 void t4_free_mem(void *addr)
765 if (is_vmalloc_addr(addr))
771 static inline int is_offload(const struct adapter *adap)
773 return adap->params.offload;
777 * Implementation of ethtool operations.
780 static u32 get_msglevel(struct net_device *dev)
782 return netdev2adap(dev)->msg_enable;
785 static void set_msglevel(struct net_device *dev, u32 val)
787 netdev2adap(dev)->msg_enable = val;
790 static char stats_strings[][ETH_GSTRING_LEN] = {
793 "TxBroadcastFrames ",
794 "TxMulticastFrames ",
802 "TxFrames512To1023 ",
803 "TxFrames1024To1518 ",
804 "TxFrames1519ToMax ",
819 "RxBroadcastFrames ",
820 "RxMulticastFrames ",
834 "RxFrames512To1023 ",
835 "RxFrames1024To1518 ",
836 "RxFrames1519ToMax ",
848 "RxBG0FramesDropped ",
849 "RxBG1FramesDropped ",
850 "RxBG2FramesDropped ",
851 "RxBG3FramesDropped ",
864 static int get_sset_count(struct net_device *dev, int sset)
868 return ARRAY_SIZE(stats_strings);
874 #define T4_REGMAP_SIZE (160 * 1024)
876 static int get_regs_len(struct net_device *dev)
878 return T4_REGMAP_SIZE;
881 static int get_eeprom_len(struct net_device *dev)
886 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
888 struct adapter *adapter = netdev2adap(dev);
890 strcpy(info->driver, KBUILD_MODNAME);
891 strcpy(info->version, DRV_VERSION);
892 strcpy(info->bus_info, pci_name(adapter->pdev));
894 if (!adapter->params.fw_vers)
895 strcpy(info->fw_version, "N/A");
897 snprintf(info->fw_version, sizeof(info->fw_version),
898 "%u.%u.%u.%u, TP %u.%u.%u.%u",
899 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
900 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
901 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
902 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
903 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
904 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
905 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
906 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
909 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
911 if (stringset == ETH_SS_STATS)
912 memcpy(data, stats_strings, sizeof(stats_strings));
916 * port stats maintained per queue of the port. They should be in the same
917 * order as in stats_strings above.
919 struct queue_port_stats {
927 static void collect_sge_port_stats(const struct adapter *adap,
928 const struct port_info *p, struct queue_port_stats *s)
931 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
932 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
934 memset(s, 0, sizeof(*s));
935 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
937 s->tx_csum += tx->tx_cso;
938 s->rx_csum += rx->stats.rx_cso;
939 s->vlan_ex += rx->stats.vlan_ex;
940 s->vlan_ins += tx->vlan_ins;
944 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
947 struct port_info *pi = netdev_priv(dev);
948 struct adapter *adapter = pi->adapter;
950 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
952 data += sizeof(struct port_stats) / sizeof(u64);
953 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
957 * Return a version number to identify the type of adapter. The scheme is:
958 * - bits 0..9: chip version
959 * - bits 10..15: chip revision
961 static inline unsigned int mk_adap_vers(const struct adapter *ap)
963 return 4 | (ap->params.rev << 10);
966 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
969 u32 *p = buf + start;
971 for ( ; start <= end; start += sizeof(u32))
972 *p++ = t4_read_reg(ap, start);
975 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
978 static const unsigned int reg_ranges[] = {
1197 struct adapter *ap = netdev2adap(dev);
1199 regs->version = mk_adap_vers(ap);
1201 memset(buf, 0, T4_REGMAP_SIZE);
1202 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1203 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1206 static int restart_autoneg(struct net_device *dev)
1208 struct port_info *p = netdev_priv(dev);
1210 if (!netif_running(dev))
1212 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1214 t4_restart_aneg(p->adapter, 0, p->tx_chan);
1218 static int identify_port(struct net_device *dev, u32 data)
1221 data = 2; /* default to 2 seconds */
1223 return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
1227 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1231 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
1233 if (caps & FW_PORT_CAP_SPEED_100M)
1234 v |= SUPPORTED_100baseT_Full;
1235 if (caps & FW_PORT_CAP_SPEED_1G)
1236 v |= SUPPORTED_1000baseT_Full;
1237 if (caps & FW_PORT_CAP_SPEED_10G)
1238 v |= SUPPORTED_10000baseT_Full;
1239 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1240 v |= SUPPORTED_Backplane;
1241 if (caps & FW_PORT_CAP_SPEED_1G)
1242 v |= SUPPORTED_1000baseKX_Full;
1243 if (caps & FW_PORT_CAP_SPEED_10G)
1244 v |= SUPPORTED_10000baseKX4_Full;
1245 } else if (type == FW_PORT_TYPE_KR)
1246 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1247 else if (type == FW_PORT_TYPE_FIBER)
1248 v |= SUPPORTED_FIBRE;
1250 if (caps & FW_PORT_CAP_ANEG)
1251 v |= SUPPORTED_Autoneg;
1255 static unsigned int to_fw_linkcaps(unsigned int caps)
1259 if (caps & ADVERTISED_100baseT_Full)
1260 v |= FW_PORT_CAP_SPEED_100M;
1261 if (caps & ADVERTISED_1000baseT_Full)
1262 v |= FW_PORT_CAP_SPEED_1G;
1263 if (caps & ADVERTISED_10000baseT_Full)
1264 v |= FW_PORT_CAP_SPEED_10G;
1268 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1270 const struct port_info *p = netdev_priv(dev);
1272 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1273 p->port_type == FW_PORT_TYPE_BT_XAUI)
1274 cmd->port = PORT_TP;
1275 else if (p->port_type == FW_PORT_TYPE_FIBER)
1276 cmd->port = PORT_FIBRE;
1277 else if (p->port_type == FW_PORT_TYPE_TWINAX)
1278 cmd->port = PORT_DA;
1280 cmd->port = PORT_OTHER;
1282 if (p->mdio_addr >= 0) {
1283 cmd->phy_address = p->mdio_addr;
1284 cmd->transceiver = XCVR_EXTERNAL;
1285 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1286 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1288 cmd->phy_address = 0; /* not really, but no better option */
1289 cmd->transceiver = XCVR_INTERNAL;
1290 cmd->mdio_support = 0;
1293 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1294 cmd->advertising = from_fw_linkcaps(p->port_type,
1295 p->link_cfg.advertising);
1296 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1297 cmd->duplex = DUPLEX_FULL;
1298 cmd->autoneg = p->link_cfg.autoneg;
1304 static unsigned int speed_to_caps(int speed)
1306 if (speed == SPEED_100)
1307 return FW_PORT_CAP_SPEED_100M;
1308 if (speed == SPEED_1000)
1309 return FW_PORT_CAP_SPEED_1G;
1310 if (speed == SPEED_10000)
1311 return FW_PORT_CAP_SPEED_10G;
1315 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1318 struct port_info *p = netdev_priv(dev);
1319 struct link_config *lc = &p->link_cfg;
1321 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1324 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1326 * PHY offers a single speed. See if that's what's
1329 if (cmd->autoneg == AUTONEG_DISABLE &&
1330 (lc->supported & speed_to_caps(cmd->speed)))
1335 if (cmd->autoneg == AUTONEG_DISABLE) {
1336 cap = speed_to_caps(cmd->speed);
1338 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1339 cmd->speed == SPEED_10000)
1341 lc->requested_speed = cap;
1342 lc->advertising = 0;
1344 cap = to_fw_linkcaps(cmd->advertising);
1345 if (!(lc->supported & cap))
1347 lc->requested_speed = 0;
1348 lc->advertising = cap | FW_PORT_CAP_ANEG;
1350 lc->autoneg = cmd->autoneg;
1352 if (netif_running(dev))
1353 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1357 static void get_pauseparam(struct net_device *dev,
1358 struct ethtool_pauseparam *epause)
1360 struct port_info *p = netdev_priv(dev);
1362 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1363 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1364 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1367 static int set_pauseparam(struct net_device *dev,
1368 struct ethtool_pauseparam *epause)
1370 struct port_info *p = netdev_priv(dev);
1371 struct link_config *lc = &p->link_cfg;
1373 if (epause->autoneg == AUTONEG_DISABLE)
1374 lc->requested_fc = 0;
1375 else if (lc->supported & FW_PORT_CAP_ANEG)
1376 lc->requested_fc = PAUSE_AUTONEG;
1380 if (epause->rx_pause)
1381 lc->requested_fc |= PAUSE_RX;
1382 if (epause->tx_pause)
1383 lc->requested_fc |= PAUSE_TX;
1384 if (netif_running(dev))
1385 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1389 static u32 get_rx_csum(struct net_device *dev)
1391 struct port_info *p = netdev_priv(dev);
1393 return p->rx_offload & RX_CSO;
1396 static int set_rx_csum(struct net_device *dev, u32 data)
1398 struct port_info *p = netdev_priv(dev);
1401 p->rx_offload |= RX_CSO;
1403 p->rx_offload &= ~RX_CSO;
1407 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1409 const struct port_info *pi = netdev_priv(dev);
1410 const struct sge *s = &pi->adapter->sge;
1412 e->rx_max_pending = MAX_RX_BUFFERS;
1413 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1414 e->rx_jumbo_max_pending = 0;
1415 e->tx_max_pending = MAX_TXQ_ENTRIES;
1417 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1418 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1419 e->rx_jumbo_pending = 0;
1420 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1423 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1426 const struct port_info *pi = netdev_priv(dev);
1427 struct adapter *adapter = pi->adapter;
1428 struct sge *s = &adapter->sge;
1430 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1431 e->tx_pending > MAX_TXQ_ENTRIES ||
1432 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1433 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1434 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1437 if (adapter->flags & FULL_INIT_DONE)
1440 for (i = 0; i < pi->nqsets; ++i) {
1441 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1442 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1443 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1448 static int closest_timer(const struct sge *s, int time)
1450 int i, delta, match = 0, min_delta = INT_MAX;
1452 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1453 delta = time - s->timer_val[i];
1456 if (delta < min_delta) {
1464 static int closest_thres(const struct sge *s, int thres)
1466 int i, delta, match = 0, min_delta = INT_MAX;
1468 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1469 delta = thres - s->counter_val[i];
1472 if (delta < min_delta) {
1481 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1483 static unsigned int qtimer_val(const struct adapter *adap,
1484 const struct sge_rspq *q)
1486 unsigned int idx = q->intr_params >> 1;
1488 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1492 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1493 * @adap: the adapter
1495 * @us: the hold-off time in us, or 0 to disable timer
1496 * @cnt: the hold-off packet count, or 0 to disable counter
1498 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1499 * one of the two needs to be enabled for the queue to generate interrupts.
1501 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1502 unsigned int us, unsigned int cnt)
1504 if ((us | cnt) == 0)
1511 new_idx = closest_thres(&adap->sge, cnt);
1512 if (q->desc && q->pktcnt_idx != new_idx) {
1513 /* the queue has already been created, update it */
1514 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1515 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1516 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1517 err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
1521 q->pktcnt_idx = new_idx;
1524 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1525 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1529 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1531 const struct port_info *pi = netdev_priv(dev);
1532 struct adapter *adap = pi->adapter;
1534 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1535 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1538 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1540 const struct port_info *pi = netdev_priv(dev);
1541 const struct adapter *adap = pi->adapter;
1542 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1544 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1545 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1546 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1551 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1552 * through virtual addresses starting at 31K, the rest is accessed through
1553 * virtual addresses starting at 0. This mapping is correct only for PF0.
1555 static int eeprom_ptov(unsigned int phys_addr)
1557 if (phys_addr < 1024)
1558 return phys_addr + (31 << 10);
1559 if (phys_addr < EEPROMSIZE)
1560 return phys_addr - 1024;
1565 * The next two routines implement eeprom read/write from physical addresses.
1566 * The physical->virtual translation is correct only for PF0.
1568 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1570 int vaddr = eeprom_ptov(phys_addr);
1573 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1574 return vaddr < 0 ? vaddr : 0;
1577 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1579 int vaddr = eeprom_ptov(phys_addr);
1582 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1583 return vaddr < 0 ? vaddr : 0;
1586 #define EEPROM_MAGIC 0x38E2F10C
1588 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1592 struct adapter *adapter = netdev2adap(dev);
1594 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1598 e->magic = EEPROM_MAGIC;
1599 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1600 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1603 memcpy(data, buf + e->offset, e->len);
1608 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1613 u32 aligned_offset, aligned_len, *p;
1614 struct adapter *adapter = netdev2adap(dev);
1616 if (eeprom->magic != EEPROM_MAGIC)
1619 aligned_offset = eeprom->offset & ~3;
1620 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1622 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1624 * RMW possibly needed for first or last words.
1626 buf = kmalloc(aligned_len, GFP_KERNEL);
1629 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1630 if (!err && aligned_len > 4)
1631 err = eeprom_rd_phys(adapter,
1632 aligned_offset + aligned_len - 4,
1633 (u32 *)&buf[aligned_len - 4]);
1636 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1640 err = t4_seeprom_wp(adapter, false);
1644 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1645 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1646 aligned_offset += 4;
1650 err = t4_seeprom_wp(adapter, true);
1657 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1660 const struct firmware *fw;
1661 struct adapter *adap = netdev2adap(netdev);
1663 ef->data[sizeof(ef->data) - 1] = '\0';
1664 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1668 ret = t4_load_fw(adap, fw->data, fw->size);
1669 release_firmware(fw);
1671 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1675 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1676 #define BCAST_CRC 0xa0ccc1a6
1678 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1680 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1681 wol->wolopts = netdev2adap(dev)->wol;
1682 memset(&wol->sopass, 0, sizeof(wol->sopass));
1685 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1688 struct port_info *pi = netdev_priv(dev);
1690 if (wol->wolopts & ~WOL_SUPPORTED)
1692 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1693 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1694 if (wol->wolopts & WAKE_BCAST) {
1695 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1698 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1699 ~6ULL, ~0ULL, BCAST_CRC, true);
1701 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1705 static int set_tso(struct net_device *dev, u32 value)
1708 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1710 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1714 static struct ethtool_ops cxgb_ethtool_ops = {
1715 .get_settings = get_settings,
1716 .set_settings = set_settings,
1717 .get_drvinfo = get_drvinfo,
1718 .get_msglevel = get_msglevel,
1719 .set_msglevel = set_msglevel,
1720 .get_ringparam = get_sge_param,
1721 .set_ringparam = set_sge_param,
1722 .get_coalesce = get_coalesce,
1723 .set_coalesce = set_coalesce,
1724 .get_eeprom_len = get_eeprom_len,
1725 .get_eeprom = get_eeprom,
1726 .set_eeprom = set_eeprom,
1727 .get_pauseparam = get_pauseparam,
1728 .set_pauseparam = set_pauseparam,
1729 .get_rx_csum = get_rx_csum,
1730 .set_rx_csum = set_rx_csum,
1731 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1732 .set_sg = ethtool_op_set_sg,
1733 .get_link = ethtool_op_get_link,
1734 .get_strings = get_strings,
1735 .phys_id = identify_port,
1736 .nway_reset = restart_autoneg,
1737 .get_sset_count = get_sset_count,
1738 .get_ethtool_stats = get_stats,
1739 .get_regs_len = get_regs_len,
1740 .get_regs = get_regs,
1744 .flash_device = set_flash,
1751 static int mem_open(struct inode *inode, struct file *file)
1753 file->private_data = inode->i_private;
1757 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1761 loff_t avail = file->f_path.dentry->d_inode->i_size;
1762 unsigned int mem = (uintptr_t)file->private_data & 3;
1763 struct adapter *adap = file->private_data - mem;
1769 if (count > avail - pos)
1770 count = avail - pos;
1778 ret = t4_mc_read(adap, pos, data, NULL);
1780 ret = t4_edc_read(adap, mem, pos, data, NULL);
1784 ofst = pos % sizeof(data);
1785 len = min(count, sizeof(data) - ofst);
1786 if (copy_to_user(buf, (u8 *)data + ofst, len))
1793 count = pos - *ppos;
1798 static const struct file_operations mem_debugfs_fops = {
1799 .owner = THIS_MODULE,
1804 static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
1805 unsigned int idx, unsigned int size_mb)
1809 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
1810 (void *)adap + idx, &mem_debugfs_fops);
1811 if (de && de->d_inode)
1812 de->d_inode->i_size = size_mb << 20;
1815 static int __devinit setup_debugfs(struct adapter *adap)
1819 if (IS_ERR_OR_NULL(adap->debugfs_root))
1822 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
1823 if (i & EDRAM0_ENABLE)
1824 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
1825 if (i & EDRAM1_ENABLE)
1826 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
1827 if (i & EXT_MEM_ENABLE)
1828 add_debugfs_mem(adap, "mc", MEM_MC,
1829 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
1831 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
1837 * upper-layer driver support
1841 * Allocate an active-open TID and set it to the supplied value.
1843 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1847 spin_lock_bh(&t->atid_lock);
1849 union aopen_entry *p = t->afree;
1851 atid = p - t->atid_tab;
1856 spin_unlock_bh(&t->atid_lock);
1859 EXPORT_SYMBOL(cxgb4_alloc_atid);
1862 * Release an active-open TID.
1864 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1866 union aopen_entry *p = &t->atid_tab[atid];
1868 spin_lock_bh(&t->atid_lock);
1872 spin_unlock_bh(&t->atid_lock);
1874 EXPORT_SYMBOL(cxgb4_free_atid);
1877 * Allocate a server TID and set it to the supplied value.
1879 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1883 spin_lock_bh(&t->stid_lock);
1884 if (family == PF_INET) {
1885 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1886 if (stid < t->nstids)
1887 __set_bit(stid, t->stid_bmap);
1891 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1896 t->stid_tab[stid].data = data;
1897 stid += t->stid_base;
1900 spin_unlock_bh(&t->stid_lock);
1903 EXPORT_SYMBOL(cxgb4_alloc_stid);
1906 * Release a server TID.
1908 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1910 stid -= t->stid_base;
1911 spin_lock_bh(&t->stid_lock);
1912 if (family == PF_INET)
1913 __clear_bit(stid, t->stid_bmap);
1915 bitmap_release_region(t->stid_bmap, stid, 2);
1916 t->stid_tab[stid].data = NULL;
1918 spin_unlock_bh(&t->stid_lock);
1920 EXPORT_SYMBOL(cxgb4_free_stid);
1923 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1925 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1928 struct cpl_tid_release *req;
1930 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1931 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1932 INIT_TP_WR(req, tid);
1933 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1937 * Queue a TID release request and if necessary schedule a work queue to
1940 void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1943 void **p = &t->tid_tab[tid];
1944 struct adapter *adap = container_of(t, struct adapter, tids);
1946 spin_lock_bh(&adap->tid_release_lock);
1947 *p = adap->tid_release_head;
1948 /* Low 2 bits encode the Tx channel number */
1949 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1950 if (!adap->tid_release_task_busy) {
1951 adap->tid_release_task_busy = true;
1952 schedule_work(&adap->tid_release_task);
1954 spin_unlock_bh(&adap->tid_release_lock);
1956 EXPORT_SYMBOL(cxgb4_queue_tid_release);
1959 * Process the list of pending TID release requests.
1961 static void process_tid_release_list(struct work_struct *work)
1963 struct sk_buff *skb;
1964 struct adapter *adap;
1966 adap = container_of(work, struct adapter, tid_release_task);
1968 spin_lock_bh(&adap->tid_release_lock);
1969 while (adap->tid_release_head) {
1970 void **p = adap->tid_release_head;
1971 unsigned int chan = (uintptr_t)p & 3;
1972 p = (void *)p - chan;
1974 adap->tid_release_head = *p;
1976 spin_unlock_bh(&adap->tid_release_lock);
1978 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1980 schedule_timeout_uninterruptible(1);
1982 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1983 t4_ofld_send(adap, skb);
1984 spin_lock_bh(&adap->tid_release_lock);
1986 adap->tid_release_task_busy = false;
1987 spin_unlock_bh(&adap->tid_release_lock);
1991 * Release a TID and inform HW. If we are unable to allocate the release
1992 * message we defer to a work queue.
1994 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1997 struct sk_buff *skb;
1998 struct adapter *adap = container_of(t, struct adapter, tids);
2000 old = t->tid_tab[tid];
2001 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2003 t->tid_tab[tid] = NULL;
2004 mk_tid_release(skb, chan, tid);
2005 t4_ofld_send(adap, skb);
2007 cxgb4_queue_tid_release(t, chan, tid);
2009 atomic_dec(&t->tids_in_use);
2011 EXPORT_SYMBOL(cxgb4_remove_tid);
2014 * Allocate and initialize the TID tables. Returns 0 on success.
2016 static int tid_init(struct tid_info *t)
2019 unsigned int natids = t->natids;
2021 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2022 t->nstids * sizeof(*t->stid_tab) +
2023 BITS_TO_LONGS(t->nstids) * sizeof(long);
2024 t->tid_tab = t4_alloc_mem(size);
2028 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2029 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2030 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2031 spin_lock_init(&t->stid_lock);
2032 spin_lock_init(&t->atid_lock);
2034 t->stids_in_use = 0;
2036 t->atids_in_use = 0;
2037 atomic_set(&t->tids_in_use, 0);
2039 /* Setup the free list for atid_tab and clear the stid bitmap. */
2042 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2043 t->afree = t->atid_tab;
2045 bitmap_zero(t->stid_bmap, t->nstids);
2050 * cxgb4_create_server - create an IP server
2052 * @stid: the server TID
2053 * @sip: local IP address to bind server to
2054 * @sport: the server's TCP port
2055 * @queue: queue to direct messages from this server to
2057 * Create an IP server for the given port and address.
2058 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2060 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2061 __be32 sip, __be16 sport, unsigned int queue)
2064 struct sk_buff *skb;
2065 struct adapter *adap;
2066 struct cpl_pass_open_req *req;
2068 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2072 adap = netdev2adap(dev);
2073 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2075 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2076 req->local_port = sport;
2077 req->peer_port = htons(0);
2078 req->local_ip = sip;
2079 req->peer_ip = htonl(0);
2080 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2081 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2082 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2083 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2084 return t4_mgmt_tx(adap, skb);
2086 EXPORT_SYMBOL(cxgb4_create_server);
2089 * cxgb4_create_server6 - create an IPv6 server
2091 * @stid: the server TID
2092 * @sip: local IPv6 address to bind server to
2093 * @sport: the server's TCP port
2094 * @queue: queue to direct messages from this server to
2096 * Create an IPv6 server for the given port and address.
2097 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2099 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2100 const struct in6_addr *sip, __be16 sport,
2104 struct sk_buff *skb;
2105 struct adapter *adap;
2106 struct cpl_pass_open_req6 *req;
2108 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2112 adap = netdev2adap(dev);
2113 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2115 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2116 req->local_port = sport;
2117 req->peer_port = htons(0);
2118 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2119 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2120 req->peer_ip_hi = cpu_to_be64(0);
2121 req->peer_ip_lo = cpu_to_be64(0);
2122 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2123 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2124 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2125 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2126 return t4_mgmt_tx(adap, skb);
2128 EXPORT_SYMBOL(cxgb4_create_server6);
2131 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2132 * @mtus: the HW MTU table
2133 * @mtu: the target MTU
2134 * @idx: index of selected entry in the MTU table
2136 * Returns the index and the value in the HW MTU table that is closest to
2137 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2138 * table, in which case that smallest available value is selected.
2140 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2145 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2151 EXPORT_SYMBOL(cxgb4_best_mtu);
2154 * cxgb4_port_chan - get the HW channel of a port
2155 * @dev: the net device for the port
2157 * Return the HW Tx channel of the given port.
2159 unsigned int cxgb4_port_chan(const struct net_device *dev)
2161 return netdev2pinfo(dev)->tx_chan;
2163 EXPORT_SYMBOL(cxgb4_port_chan);
2166 * cxgb4_port_viid - get the VI id of a port
2167 * @dev: the net device for the port
2169 * Return the VI id of the given port.
2171 unsigned int cxgb4_port_viid(const struct net_device *dev)
2173 return netdev2pinfo(dev)->viid;
2175 EXPORT_SYMBOL(cxgb4_port_viid);
2178 * cxgb4_port_idx - get the index of a port
2179 * @dev: the net device for the port
2181 * Return the index of the given port.
2183 unsigned int cxgb4_port_idx(const struct net_device *dev)
2185 return netdev2pinfo(dev)->port_id;
2187 EXPORT_SYMBOL(cxgb4_port_idx);
2190 * cxgb4_netdev_by_hwid - return the net device of a HW port
2191 * @pdev: identifies the adapter
2192 * @id: the HW port id
2194 * Return the net device associated with the interface with the given HW
2197 struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2199 const struct adapter *adap = pci_get_drvdata(pdev);
2201 if (!adap || id >= NCHAN)
2203 id = adap->chan_map[id];
2204 return id < MAX_NPORTS ? adap->port[id] : NULL;
2206 EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2208 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2209 struct tp_tcp_stats *v6)
2211 struct adapter *adap = pci_get_drvdata(pdev);
2213 spin_lock(&adap->stats_lock);
2214 t4_tp_get_tcp_stats(adap, v4, v6);
2215 spin_unlock(&adap->stats_lock);
2217 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2219 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2220 const unsigned int *pgsz_order)
2222 struct adapter *adap = netdev2adap(dev);
2224 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2225 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2226 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2227 HPZ3(pgsz_order[3]));
2229 EXPORT_SYMBOL(cxgb4_iscsi_init);
2231 static struct pci_driver cxgb4_driver;
2233 static void check_neigh_update(struct neighbour *neigh)
2235 const struct device *parent;
2236 const struct net_device *netdev = neigh->dev;
2238 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2239 netdev = vlan_dev_real_dev(netdev);
2240 parent = netdev->dev.parent;
2241 if (parent && parent->driver == &cxgb4_driver.driver)
2242 t4_l2t_update(dev_get_drvdata(parent), neigh);
2245 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2249 case NETEVENT_NEIGH_UPDATE:
2250 check_neigh_update(data);
2252 case NETEVENT_PMTU_UPDATE:
2253 case NETEVENT_REDIRECT:
2260 static bool netevent_registered;
2261 static struct notifier_block cxgb4_netevent_nb = {
2262 .notifier_call = netevent_cb
2265 static void uld_attach(struct adapter *adap, unsigned int uld)
2268 struct cxgb4_lld_info lli;
2270 lli.pdev = adap->pdev;
2271 lli.l2t = adap->l2t;
2272 lli.tids = &adap->tids;
2273 lli.ports = adap->port;
2274 lli.vr = &adap->vres;
2275 lli.mtus = adap->params.mtus;
2276 if (uld == CXGB4_ULD_RDMA) {
2277 lli.rxq_ids = adap->sge.rdma_rxq;
2278 lli.nrxq = adap->sge.rdmaqs;
2279 } else if (uld == CXGB4_ULD_ISCSI) {
2280 lli.rxq_ids = adap->sge.ofld_rxq;
2281 lli.nrxq = adap->sge.ofldqsets;
2283 lli.ntxq = adap->sge.ofldqsets;
2284 lli.nchan = adap->params.nports;
2285 lli.nports = adap->params.nports;
2286 lli.wr_cred = adap->params.ofldq_wr_cred;
2287 lli.adapter_type = adap->params.rev;
2288 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2289 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2290 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
2291 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2292 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
2293 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2294 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2295 lli.fw_vers = adap->params.fw_vers;
2297 handle = ulds[uld].add(&lli);
2298 if (IS_ERR(handle)) {
2299 dev_warn(adap->pdev_dev,
2300 "could not attach to the %s driver, error %ld\n",
2301 uld_str[uld], PTR_ERR(handle));
2305 adap->uld_handle[uld] = handle;
2307 if (!netevent_registered) {
2308 register_netevent_notifier(&cxgb4_netevent_nb);
2309 netevent_registered = true;
2313 static void attach_ulds(struct adapter *adap)
2317 mutex_lock(&uld_mutex);
2318 list_add_tail(&adap->list_node, &adapter_list);
2319 for (i = 0; i < CXGB4_ULD_MAX; i++)
2321 uld_attach(adap, i);
2322 mutex_unlock(&uld_mutex);
2325 static void detach_ulds(struct adapter *adap)
2329 mutex_lock(&uld_mutex);
2330 list_del(&adap->list_node);
2331 for (i = 0; i < CXGB4_ULD_MAX; i++)
2332 if (adap->uld_handle[i]) {
2333 ulds[i].state_change(adap->uld_handle[i],
2334 CXGB4_STATE_DETACH);
2335 adap->uld_handle[i] = NULL;
2337 if (netevent_registered && list_empty(&adapter_list)) {
2338 unregister_netevent_notifier(&cxgb4_netevent_nb);
2339 netevent_registered = false;
2341 mutex_unlock(&uld_mutex);
2344 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2348 mutex_lock(&uld_mutex);
2349 for (i = 0; i < CXGB4_ULD_MAX; i++)
2350 if (adap->uld_handle[i])
2351 ulds[i].state_change(adap->uld_handle[i], new_state);
2352 mutex_unlock(&uld_mutex);
2356 * cxgb4_register_uld - register an upper-layer driver
2357 * @type: the ULD type
2358 * @p: the ULD methods
2360 * Registers an upper-layer driver with this driver and notifies the ULD
2361 * about any presently available devices that support its type. Returns
2362 * %-EBUSY if a ULD of the same type is already registered.
2364 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2367 struct adapter *adap;
2369 if (type >= CXGB4_ULD_MAX)
2371 mutex_lock(&uld_mutex);
2372 if (ulds[type].add) {
2377 list_for_each_entry(adap, &adapter_list, list_node)
2378 uld_attach(adap, type);
2379 out: mutex_unlock(&uld_mutex);
2382 EXPORT_SYMBOL(cxgb4_register_uld);
2385 * cxgb4_unregister_uld - unregister an upper-layer driver
2386 * @type: the ULD type
2388 * Unregisters an existing upper-layer driver.
2390 int cxgb4_unregister_uld(enum cxgb4_uld type)
2392 struct adapter *adap;
2394 if (type >= CXGB4_ULD_MAX)
2396 mutex_lock(&uld_mutex);
2397 list_for_each_entry(adap, &adapter_list, list_node)
2398 adap->uld_handle[type] = NULL;
2399 ulds[type].add = NULL;
2400 mutex_unlock(&uld_mutex);
2403 EXPORT_SYMBOL(cxgb4_unregister_uld);
2406 * cxgb_up - enable the adapter
2407 * @adap: adapter being enabled
2409 * Called when the first port is enabled, this function performs the
2410 * actions necessary to make an adapter operational, such as completing
2411 * the initialization of HW modules, and enabling interrupts.
2413 * Must be called with the rtnl lock held.
2415 static int cxgb_up(struct adapter *adap)
2419 if (!(adap->flags & FULL_INIT_DONE)) {
2420 err = setup_sge_queues(adap);
2423 err = setup_rss(adap);
2425 t4_free_sge_resources(adap);
2428 if (adap->flags & USING_MSIX)
2429 name_msix_vecs(adap);
2430 adap->flags |= FULL_INIT_DONE;
2433 if (adap->flags & USING_MSIX) {
2434 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2435 adap->msix_info[0].desc, adap);
2439 err = request_msix_queue_irqs(adap);
2441 free_irq(adap->msix_info[0].vec, adap);
2445 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2446 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2453 t4_intr_enable(adap);
2454 notify_ulds(adap, CXGB4_STATE_UP);
2458 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2462 static void cxgb_down(struct adapter *adapter)
2464 t4_intr_disable(adapter);
2465 cancel_work_sync(&adapter->tid_release_task);
2466 adapter->tid_release_task_busy = false;
2468 if (adapter->flags & USING_MSIX) {
2469 free_msix_queue_irqs(adapter);
2470 free_irq(adapter->msix_info[0].vec, adapter);
2472 free_irq(adapter->pdev->irq, adapter);
2473 quiesce_rx(adapter);
2477 * net_device operations
2479 static int cxgb_open(struct net_device *dev)
2482 struct port_info *pi = netdev_priv(dev);
2483 struct adapter *adapter = pi->adapter;
2485 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
2488 dev->real_num_tx_queues = pi->nqsets;
2489 set_bit(pi->tx_chan, &adapter->open_device_map);
2491 netif_tx_start_all_queues(dev);
2495 static int cxgb_close(struct net_device *dev)
2498 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->adapter;
2501 netif_tx_stop_all_queues(dev);
2502 netif_carrier_off(dev);
2503 ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
2505 clear_bit(pi->tx_chan, &adapter->open_device_map);
2507 if (!adapter->open_device_map)
2512 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
2514 struct port_stats stats;
2515 struct port_info *p = netdev_priv(dev);
2516 struct adapter *adapter = p->adapter;
2517 struct net_device_stats *ns = &dev->stats;
2519 spin_lock(&adapter->stats_lock);
2520 t4_get_port_stats(adapter, p->tx_chan, &stats);
2521 spin_unlock(&adapter->stats_lock);
2523 ns->tx_bytes = stats.tx_octets;
2524 ns->tx_packets = stats.tx_frames;
2525 ns->rx_bytes = stats.rx_octets;
2526 ns->rx_packets = stats.rx_frames;
2527 ns->multicast = stats.rx_mcast_frames;
2529 /* detailed rx_errors */
2530 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2532 ns->rx_over_errors = 0;
2533 ns->rx_crc_errors = stats.rx_fcs_err;
2534 ns->rx_frame_errors = stats.rx_symbol_err;
2535 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2536 stats.rx_ovflow2 + stats.rx_ovflow3 +
2537 stats.rx_trunc0 + stats.rx_trunc1 +
2538 stats.rx_trunc2 + stats.rx_trunc3;
2539 ns->rx_missed_errors = 0;
2541 /* detailed tx_errors */
2542 ns->tx_aborted_errors = 0;
2543 ns->tx_carrier_errors = 0;
2544 ns->tx_fifo_errors = 0;
2545 ns->tx_heartbeat_errors = 0;
2546 ns->tx_window_errors = 0;
2548 ns->tx_errors = stats.tx_error_frames;
2549 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2550 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2554 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2556 int ret = 0, prtad, devad;
2557 struct port_info *pi = netdev_priv(dev);
2558 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2562 if (pi->mdio_addr < 0)
2564 data->phy_id = pi->mdio_addr;
2568 if (mdio_phy_id_is_c45(data->phy_id)) {
2569 prtad = mdio_phy_id_prtad(data->phy_id);
2570 devad = mdio_phy_id_devad(data->phy_id);
2571 } else if (data->phy_id < 32) {
2572 prtad = data->phy_id;
2574 data->reg_num &= 0x1f;
2578 if (cmd == SIOCGMIIREG)
2579 ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
2580 data->reg_num, &data->val_out);
2582 ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
2583 data->reg_num, data->val_in);
2591 static void cxgb_set_rxmode(struct net_device *dev)
2593 /* unfortunately we can't return errors to the stack */
2594 set_rxmode(dev, -1, false);
2597 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2600 struct port_info *pi = netdev_priv(dev);
2602 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2604 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
2611 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2614 struct sockaddr *addr = p;
2615 struct port_info *pi = netdev_priv(dev);
2617 if (!is_valid_ether_addr(addr->sa_data))
2620 ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
2621 addr->sa_data, true, true);
2625 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2626 pi->xact_addr_filt = ret;
2630 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2632 struct port_info *pi = netdev_priv(dev);
2635 t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
2638 #ifdef CONFIG_NET_POLL_CONTROLLER
2639 static void cxgb_netpoll(struct net_device *dev)
2641 struct port_info *pi = netdev_priv(dev);
2642 struct adapter *adap = pi->adapter;
2644 if (adap->flags & USING_MSIX) {
2646 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2648 for (i = pi->nqsets; i; i--, rx++)
2649 t4_sge_intr_msix(0, &rx->rspq);
2651 t4_intr_handler(adap)(0, adap);
2655 static const struct net_device_ops cxgb4_netdev_ops = {
2656 .ndo_open = cxgb_open,
2657 .ndo_stop = cxgb_close,
2658 .ndo_start_xmit = t4_eth_xmit,
2659 .ndo_get_stats = cxgb_get_stats,
2660 .ndo_set_rx_mode = cxgb_set_rxmode,
2661 .ndo_set_mac_address = cxgb_set_mac_addr,
2662 .ndo_validate_addr = eth_validate_addr,
2663 .ndo_do_ioctl = cxgb_ioctl,
2664 .ndo_change_mtu = cxgb_change_mtu,
2665 .ndo_vlan_rx_register = vlan_rx_register,
2666 #ifdef CONFIG_NET_POLL_CONTROLLER
2667 .ndo_poll_controller = cxgb_netpoll,
2671 void t4_fatal_err(struct adapter *adap)
2673 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2674 t4_intr_disable(adap);
2675 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2678 static void setup_memwin(struct adapter *adap)
2682 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2683 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2684 (bar0 + MEMWIN0_BASE) | BIR(0) |
2685 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2686 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2687 (bar0 + MEMWIN1_BASE) | BIR(0) |
2688 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2689 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2690 (bar0 + MEMWIN2_BASE) | BIR(0) |
2691 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2695 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2697 #define MAX_ATIDS 8192U
2700 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2702 static int adap_init0(struct adapter *adap)
2706 enum dev_state state;
2707 u32 params[7], val[7];
2708 struct fw_caps_config_cmd c;
2710 ret = t4_check_fw_version(adap);
2711 if (ret == -EINVAL || ret > 0) {
2712 if (upgrade_fw(adap) >= 0) /* recache FW version */
2713 ret = t4_check_fw_version(adap);
2718 /* contact FW, request master */
2719 ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
2721 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2727 ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
2731 /* get device capabilities */
2732 memset(&c, 0, sizeof(c));
2733 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2734 FW_CMD_REQUEST | FW_CMD_READ);
2735 c.retval_len16 = htonl(FW_LEN16(c));
2736 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2740 /* select capabilities we'll be using */
2741 if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2743 c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2745 c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2746 } else if (vf_acls) {
2747 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2750 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2751 FW_CMD_REQUEST | FW_CMD_WRITE);
2752 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
2756 ret = t4_config_glbl_rss(adap, 0,
2757 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2758 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2759 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2763 ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2764 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2768 for (v = 0; v < SGE_NTIMERS - 1; v++)
2769 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2770 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2771 adap->sge.counter_val[0] = 1;
2772 for (v = 1; v < SGE_NCOUNTERS; v++)
2773 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2777 /* get basic stuff going */
2778 ret = t4_early_init(adap, 0);
2782 #define FW_PARAM_DEV(param) \
2783 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2784 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2786 #define FW_PARAM_PFVF(param) \
2787 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2788 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2790 params[0] = FW_PARAM_DEV(PORTVEC);
2791 params[1] = FW_PARAM_PFVF(L2T_START);
2792 params[2] = FW_PARAM_PFVF(L2T_END);
2793 params[3] = FW_PARAM_PFVF(FILTER_START);
2794 params[4] = FW_PARAM_PFVF(FILTER_END);
2795 ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
2799 adap->tids.ftid_base = val[3];
2800 adap->tids.nftids = val[4] - val[3] + 1;
2803 /* query offload-related parameters */
2804 params[0] = FW_PARAM_DEV(NTID);
2805 params[1] = FW_PARAM_PFVF(SERVER_START);
2806 params[2] = FW_PARAM_PFVF(SERVER_END);
2807 params[3] = FW_PARAM_PFVF(TDDP_START);
2808 params[4] = FW_PARAM_PFVF(TDDP_END);
2809 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2810 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2813 adap->tids.ntids = val[0];
2814 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
2815 adap->tids.stid_base = val[1];
2816 adap->tids.nstids = val[2] - val[1] + 1;
2817 adap->vres.ddp.start = val[3];
2818 adap->vres.ddp.size = val[4] - val[3] + 1;
2819 adap->params.ofldq_wr_cred = val[5];
2820 adap->params.offload = 1;
2823 params[0] = FW_PARAM_PFVF(STAG_START);
2824 params[1] = FW_PARAM_PFVF(STAG_END);
2825 params[2] = FW_PARAM_PFVF(RQ_START);
2826 params[3] = FW_PARAM_PFVF(RQ_END);
2827 params[4] = FW_PARAM_PFVF(PBL_START);
2828 params[5] = FW_PARAM_PFVF(PBL_END);
2829 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2832 adap->vres.stag.start = val[0];
2833 adap->vres.stag.size = val[1] - val[0] + 1;
2834 adap->vres.rq.start = val[2];
2835 adap->vres.rq.size = val[3] - val[2] + 1;
2836 adap->vres.pbl.start = val[4];
2837 adap->vres.pbl.size = val[5] - val[4] + 1;
2840 params[0] = FW_PARAM_PFVF(ISCSI_START);
2841 params[1] = FW_PARAM_PFVF(ISCSI_END);
2842 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
2845 adap->vres.iscsi.start = val[0];
2846 adap->vres.iscsi.size = val[1] - val[0] + 1;
2848 #undef FW_PARAM_PFVF
2851 adap->params.nports = hweight32(port_vec);
2852 adap->params.portvec = port_vec;
2853 adap->flags |= FW_OK;
2855 /* These are finalized by FW initialization, load their values now */
2856 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
2857 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
2858 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
2859 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2860 adap->params.b_wnd);
2862 /* tweak some settings */
2863 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2864 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2865 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2866 v = t4_read_reg(adap, TP_PIO_DATA);
2867 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2872 * If a command timed out or failed with EIO FW does not operate within
2873 * its spec or something catastrophic happened to HW/FW, stop issuing
2876 bye: if (ret != -ETIMEDOUT && ret != -EIO)
2881 static inline bool is_10g_port(const struct link_config *lc)
2883 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
2886 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
2887 unsigned int size, unsigned int iqe_size)
2889 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
2890 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
2891 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
2892 q->iqe_len = iqe_size;
2897 * Perform default configuration of DMA queues depending on the number and type
2898 * of ports we found and the number of available CPUs. Most settings can be
2899 * modified by the admin prior to actual use.
2901 static void __devinit cfg_queues(struct adapter *adap)
2903 struct sge *s = &adap->sge;
2904 int i, q10g = 0, n10g = 0, qidx = 0;
2906 for_each_port(adap, i)
2907 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
2910 * We default to 1 queue per non-10G port and up to # of cores queues
2914 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
2915 if (q10g > num_online_cpus())
2916 q10g = num_online_cpus();
2918 for_each_port(adap, i) {
2919 struct port_info *pi = adap2pinfo(adap, i);
2921 pi->first_qset = qidx;
2922 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2927 s->max_ethqsets = qidx; /* MSI-X may lower it later */
2929 if (is_offload(adap)) {
2931 * For offload we use 1 queue/channel if all ports are up to 1G,
2932 * otherwise we divide all available queues amongst the channels
2933 * capped by the number of available cores.
2936 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
2938 s->ofldqsets = roundup(i, adap->params.nports);
2940 s->ofldqsets = adap->params.nports;
2941 /* For RDMA one Rx queue per channel suffices */
2942 s->rdmaqs = adap->params.nports;
2945 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
2946 struct sge_eth_rxq *r = &s->ethrxq[i];
2948 init_rspq(&r->rspq, 0, 0, 1024, 64);
2952 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
2953 s->ethtxq[i].q.size = 1024;
2955 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
2956 s->ctrlq[i].q.size = 512;
2958 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
2959 s->ofldtxq[i].q.size = 1024;
2961 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
2962 struct sge_ofld_rxq *r = &s->ofldrxq[i];
2964 init_rspq(&r->rspq, 0, 0, 1024, 64);
2965 r->rspq.uld = CXGB4_ULD_ISCSI;
2969 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
2970 struct sge_ofld_rxq *r = &s->rdmarxq[i];
2972 init_rspq(&r->rspq, 0, 0, 511, 64);
2973 r->rspq.uld = CXGB4_ULD_RDMA;
2977 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
2978 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
2982 * Reduce the number of Ethernet queues across all ports to at most n.
2983 * n provides at least one queue per port.
2985 static void __devinit reduce_ethqs(struct adapter *adap, int n)
2988 struct port_info *pi;
2990 while (n < adap->sge.ethqsets)
2991 for_each_port(adap, i) {
2992 pi = adap2pinfo(adap, i);
2993 if (pi->nqsets > 1) {
2995 adap->sge.ethqsets--;
2996 if (adap->sge.ethqsets <= n)
3002 for_each_port(adap, i) {
3003 pi = adap2pinfo(adap, i);
3009 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3010 #define EXTRA_VECS 2
3012 static int __devinit enable_msix(struct adapter *adap)
3015 int i, err, want, need;
3016 struct sge *s = &adap->sge;
3017 unsigned int nchan = adap->params.nports;
3018 struct msix_entry entries[MAX_INGQ + 1];
3020 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3021 entries[i].entry = i;
3023 want = s->max_ethqsets + EXTRA_VECS;
3024 if (is_offload(adap)) {
3025 want += s->rdmaqs + s->ofldqsets;
3026 /* need nchan for each possible ULD */
3027 ofld_need = 2 * nchan;
3029 need = adap->params.nports + EXTRA_VECS + ofld_need;
3031 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3036 * Distribute available vectors to the various queue groups.
3037 * Every group gets its minimum requirement and NIC gets top
3038 * priority for leftovers.
3040 i = want - EXTRA_VECS - ofld_need;
3041 if (i < s->max_ethqsets) {
3042 s->max_ethqsets = i;
3043 if (i < s->ethqsets)
3044 reduce_ethqs(adap, i);
3046 if (is_offload(adap)) {
3047 i = want - EXTRA_VECS - s->max_ethqsets;
3048 i -= ofld_need - nchan;
3049 s->ofldqsets = (i / nchan) * nchan; /* round down */
3051 for (i = 0; i < want; ++i)
3052 adap->msix_info[i].vec = entries[i].vector;
3054 dev_info(adap->pdev_dev,
3055 "only %d MSI-X vectors left, not using MSI-X\n", err);
3061 static void __devinit print_port_info(struct adapter *adap)
3063 static const char *base[] = {
3064 "R", "KX4", "T", "KX", "T", "KR", "CX4"
3070 for_each_port(adap, i) {
3071 struct net_device *dev = adap->port[i];
3072 const struct port_info *pi = netdev_priv(dev);
3075 if (!test_bit(i, &adap->registered_device_map))
3078 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3079 bufp += sprintf(bufp, "100/");
3080 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3081 bufp += sprintf(bufp, "1000/");
3082 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3083 bufp += sprintf(bufp, "10G/");
3086 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3088 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
3089 adap->params.vpd.id, adap->params.rev,
3090 buf, is_offload(adap) ? "R" : "",
3091 adap->params.pci.width,
3092 (adap->flags & USING_MSIX) ? " MSI-X" :
3093 (adap->flags & USING_MSI) ? " MSI" : "");
3094 if (adap->name == dev->name)
3095 netdev_info(dev, "S/N: %s, E/C: %s\n",
3096 adap->params.vpd.sn, adap->params.vpd.ec);
3100 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
3101 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3103 static int __devinit init_one(struct pci_dev *pdev,
3104 const struct pci_device_id *ent)
3107 struct port_info *pi;
3108 unsigned int highdma = 0;
3109 struct adapter *adapter = NULL;
3111 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3113 err = pci_request_regions(pdev, KBUILD_MODNAME);
3115 /* Just info, some other driver may have claimed the device. */
3116 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3120 /* We control everything through PF 0 */
3121 func = PCI_FUNC(pdev->devfn);
3125 err = pci_enable_device(pdev);
3127 dev_err(&pdev->dev, "cannot enable PCI device\n");
3128 goto out_release_regions;
3131 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3132 highdma = NETIF_F_HIGHDMA;
3133 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3135 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3136 "coherent allocations\n");
3137 goto out_disable_device;
3140 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3142 dev_err(&pdev->dev, "no usable DMA configuration\n");
3143 goto out_disable_device;
3147 pci_enable_pcie_error_reporting(pdev);
3148 pci_set_master(pdev);
3149 pci_save_state(pdev);
3151 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3154 goto out_disable_device;
3157 adapter->regs = pci_ioremap_bar(pdev, 0);
3158 if (!adapter->regs) {
3159 dev_err(&pdev->dev, "cannot map device registers\n");
3161 goto out_free_adapter;
3164 adapter->pdev = pdev;
3165 adapter->pdev_dev = &pdev->dev;
3166 adapter->name = pci_name(pdev);
3167 adapter->msg_enable = dflt_msg_enable;
3168 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3170 spin_lock_init(&adapter->stats_lock);
3171 spin_lock_init(&adapter->tid_release_lock);
3173 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3175 err = t4_prep_adapter(adapter);
3178 err = adap_init0(adapter);
3182 for_each_port(adapter, i) {
3183 struct net_device *netdev;
3185 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3192 SET_NETDEV_DEV(netdev, &pdev->dev);
3194 adapter->port[i] = netdev;
3195 pi = netdev_priv(netdev);
3196 pi->adapter = adapter;
3197 pi->xact_addr_filt = -1;
3198 pi->rx_offload = RX_CSO;
3200 netif_carrier_off(netdev);
3201 netif_tx_stop_all_queues(netdev);
3202 netdev->irq = pdev->irq;
3204 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3205 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3206 netdev->features |= NETIF_F_GRO | highdma;
3207 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3208 netdev->vlan_features = netdev->features & VLAN_FEAT;
3210 netdev->netdev_ops = &cxgb4_netdev_ops;
3211 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3214 pci_set_drvdata(pdev, adapter);
3216 if (adapter->flags & FW_OK) {
3217 err = t4_port_init(adapter, 0, 0, 0);
3223 * Configure queues and allocate tables now, they can be needed as
3224 * soon as the first register_netdev completes.
3226 cfg_queues(adapter);
3228 adapter->l2t = t4_init_l2t();
3229 if (!adapter->l2t) {
3230 /* We tolerate a lack of L2T, giving up some functionality */
3231 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3232 adapter->params.offload = 0;
3235 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3236 dev_warn(&pdev->dev, "could not allocate TID table, "
3238 adapter->params.offload = 0;
3242 * The card is now ready to go. If any errors occur during device
3243 * registration we do not fail the whole card but rather proceed only
3244 * with the ports we manage to register successfully. However we must
3245 * register at least one net device.
3247 for_each_port(adapter, i) {
3248 err = register_netdev(adapter->port[i]);
3250 dev_warn(&pdev->dev,
3251 "cannot register net device %s, skipping\n",
3252 adapter->port[i]->name);
3255 * Change the name we use for messages to the name of
3256 * the first successfully registered interface.
3258 if (!adapter->registered_device_map)
3259 adapter->name = adapter->port[i]->name;
3261 __set_bit(i, &adapter->registered_device_map);
3262 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3265 if (!adapter->registered_device_map) {
3266 dev_err(&pdev->dev, "could not register any net devices\n");
3270 if (cxgb4_debugfs_root) {
3271 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3272 cxgb4_debugfs_root);
3273 setup_debugfs(adapter);
3276 /* See what interrupts we'll be using */
3277 if (msi > 1 && enable_msix(adapter) == 0)
3278 adapter->flags |= USING_MSIX;
3279 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3280 adapter->flags |= USING_MSI;
3282 if (is_offload(adapter))
3283 attach_ulds(adapter);
3285 print_port_info(adapter);
3288 #ifdef CONFIG_PCI_IOV
3289 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3290 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3291 dev_info(&pdev->dev,
3292 "instantiated %u virtual functions\n",
3298 t4_free_mem(adapter->tids.tid_tab);
3299 t4_free_mem(adapter->l2t);
3300 for_each_port(adapter, i)
3301 if (adapter->port[i])
3302 free_netdev(adapter->port[i]);
3303 if (adapter->flags & FW_OK)
3304 t4_fw_bye(adapter, 0);
3306 iounmap(adapter->regs);
3310 pci_disable_pcie_error_reporting(pdev);
3311 pci_disable_device(pdev);
3312 out_release_regions:
3313 pci_release_regions(pdev);
3314 pci_set_drvdata(pdev, NULL);
3318 static void __devexit remove_one(struct pci_dev *pdev)
3320 struct adapter *adapter = pci_get_drvdata(pdev);
3322 pci_disable_sriov(pdev);
3327 if (is_offload(adapter))
3328 detach_ulds(adapter);
3330 for_each_port(adapter, i)
3331 if (test_bit(i, &adapter->registered_device_map))
3332 unregister_netdev(adapter->port[i]);
3334 if (adapter->debugfs_root)
3335 debugfs_remove_recursive(adapter->debugfs_root);
3337 t4_sge_stop(adapter);
3338 t4_free_sge_resources(adapter);
3339 t4_free_mem(adapter->l2t);
3340 t4_free_mem(adapter->tids.tid_tab);
3341 disable_msi(adapter);
3343 for_each_port(adapter, i)
3344 if (adapter->port[i])
3345 free_netdev(adapter->port[i]);
3347 if (adapter->flags & FW_OK)
3348 t4_fw_bye(adapter, 0);
3349 iounmap(adapter->regs);
3351 pci_disable_pcie_error_reporting(pdev);
3352 pci_disable_device(pdev);
3353 pci_release_regions(pdev);
3354 pci_set_drvdata(pdev, NULL);
3355 } else if (PCI_FUNC(pdev->devfn) > 0)
3356 pci_release_regions(pdev);
3359 static struct pci_driver cxgb4_driver = {
3360 .name = KBUILD_MODNAME,
3361 .id_table = cxgb4_pci_tbl,
3363 .remove = __devexit_p(remove_one),
3366 static int __init cxgb4_init_module(void)
3370 /* Debugfs support is optional, just warn if this fails */
3371 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3372 if (!cxgb4_debugfs_root)
3373 pr_warning("could not create debugfs entry, continuing\n");
3375 ret = pci_register_driver(&cxgb4_driver);
3377 debugfs_remove(cxgb4_debugfs_root);
3381 static void __exit cxgb4_cleanup_module(void)
3383 pci_unregister_driver(&cxgb4_driver);
3384 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3387 module_init(cxgb4_init_module);
3388 module_exit(cxgb4_cleanup_module);