1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
33 #define EFX_MAX_MTU (9 * 1024)
35 /* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
40 static struct workqueue_struct *refill_workqueue;
42 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
43 * queued onto this work queue. This is not a per-nic work queue, because
44 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
46 static struct workqueue_struct *reset_workqueue;
48 /**************************************************************************
52 *************************************************************************/
55 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
57 * This sets the default for new devices. It can be controlled later
60 static int lro = true;
61 module_param(lro, int, 0644);
62 MODULE_PARM_DESC(lro, "Large receive offload acceleration");
65 * Use separate channels for TX and RX events
67 * Set this to 1 to use separate channels for TX and RX. It allows us to
68 * apply a higher level of interrupt moderation to TX events.
70 * This is forced to 0 for MSI interrupt mode as the interrupt vector
73 static unsigned int separate_tx_and_rx_channels = true;
75 /* This is the weight assigned to each of the (per-channel) virtual
78 static int napi_weight = 64;
80 /* This is the time (in jiffies) between invocations of the hardware
81 * monitor, which checks for known hardware bugs and resets the
82 * hardware and driver as necessary.
84 unsigned int efx_monitor_interval = 1 * HZ;
86 /* This controls whether or not the driver will initialise devices
87 * with invalid MAC addresses stored in the EEPROM or flash. If true,
88 * such devices will be initialised with a random locally-generated
89 * MAC address. This allows for loading the sfc_mtd driver to
90 * reprogram the flash, even if the flash contents (including the MAC
91 * address) have previously been erased.
93 static unsigned int allow_bad_hwaddr;
95 /* Initial interrupt moderation settings. They can be modified after
96 * module load with ethtool.
98 * The default for RX should strike a balance between increasing the
99 * round-trip latency and reducing overhead.
101 static unsigned int rx_irq_mod_usec = 60;
103 /* Initial interrupt moderation settings. They can be modified after
104 * module load with ethtool.
106 * This default is chosen to ensure that a 10G link does not go idle
107 * while a TX queue is stopped after it has become full. A queue is
108 * restarted when it drops below half full. The time this takes (assuming
109 * worst case 3 descriptors per packet and 1024 descriptors) is
110 * 512 / 3 * 1.2 = 205 usec.
112 static unsigned int tx_irq_mod_usec = 150;
114 /* This is the first interrupt mode to try out of:
119 static unsigned int interrupt_mode;
121 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
122 * i.e. the number of CPUs among which we may distribute simultaneous
123 * interrupt handling.
125 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
126 * The default (0) means to assign an interrupt to each package (level II cache)
128 static unsigned int rss_cpus;
129 module_param(rss_cpus, uint, 0444);
130 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
132 /**************************************************************************
134 * Utility functions and prototypes
136 *************************************************************************/
137 static void efx_remove_channel(struct efx_channel *channel);
138 static void efx_remove_port(struct efx_nic *efx);
139 static void efx_fini_napi(struct efx_nic *efx);
140 static void efx_fini_channels(struct efx_nic *efx);
142 #define EFX_ASSERT_RESET_SERIALISED(efx) \
144 if (efx->state == STATE_RUNNING) \
148 /**************************************************************************
150 * Event queue processing
152 *************************************************************************/
154 /* Process channel's event queue
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
161 static int efx_process_channel(struct efx_channel *channel, int rx_quota)
163 struct efx_nic *efx = channel->efx;
166 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
170 rx_packets = falcon_process_eventq(channel, rx_quota);
174 /* Deliver last RX packet. */
175 if (channel->rx_pkt) {
176 __efx_rx_packet(channel, channel->rx_pkt,
177 channel->rx_pkt_csummed);
178 channel->rx_pkt = NULL;
181 efx_flush_lro(channel);
182 efx_rx_strategy(channel);
184 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
189 /* Mark channel as finished processing
191 * Note that since we will not receive further interrupts for this
192 * channel before we finish processing and call the eventq_read_ack()
193 * method, there is no need to use the interrupt hold-off timers.
195 static inline void efx_channel_processed(struct efx_channel *channel)
197 /* The interrupt handler for this channel may set work_pending
198 * as soon as we acknowledge the events we've seen. Make sure
199 * it's cleared before then. */
200 channel->work_pending = false;
203 falcon_eventq_read_ack(channel);
208 * NAPI guarantees serialisation of polls of the same device, which
209 * provides the guarantee required by efx_process_channel().
211 static int efx_poll(struct napi_struct *napi, int budget)
213 struct efx_channel *channel =
214 container_of(napi, struct efx_channel, napi_str);
215 struct net_device *napi_dev = channel->napi_dev;
218 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
219 channel->channel, raw_smp_processor_id());
221 rx_packets = efx_process_channel(channel, budget);
223 if (rx_packets < budget) {
224 /* There is no race here; although napi_disable() will
225 * only wait for netif_rx_complete(), this isn't a problem
226 * since efx_channel_processed() will have no effect if
227 * interrupts have already been disabled.
229 netif_rx_complete(napi_dev, napi);
230 efx_channel_processed(channel);
236 /* Process the eventq of the specified channel immediately on this CPU
238 * Disable hardware generated interrupts, wait for any existing
239 * processing to finish, then directly poll (and ack ) the eventq.
240 * Finally reenable NAPI and interrupts.
242 * Since we are touching interrupts the caller should hold the suspend lock
244 void efx_process_channel_now(struct efx_channel *channel)
246 struct efx_nic *efx = channel->efx;
248 BUG_ON(!channel->used_flags);
249 BUG_ON(!channel->enabled);
251 /* Disable interrupts and wait for ISRs to complete */
252 falcon_disable_interrupts(efx);
254 synchronize_irq(efx->legacy_irq);
256 synchronize_irq(channel->irq);
258 /* Wait for any NAPI processing to complete */
259 napi_disable(&channel->napi_str);
261 /* Poll the channel */
262 efx_process_channel(channel, efx->type->evq_size);
264 /* Ack the eventq. This may cause an interrupt to be generated
265 * when they are reenabled */
266 efx_channel_processed(channel);
268 napi_enable(&channel->napi_str);
269 falcon_enable_interrupts(efx);
272 /* Create event queue
273 * Event queue memory allocations are done only once. If the channel
274 * is reset, the memory buffer will be reused; this guards against
275 * errors during channel reset and also simplifies interrupt handling.
277 static int efx_probe_eventq(struct efx_channel *channel)
279 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
281 return falcon_probe_eventq(channel);
284 /* Prepare channel's event queue */
285 static void efx_init_eventq(struct efx_channel *channel)
287 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
289 channel->eventq_read_ptr = 0;
291 falcon_init_eventq(channel);
294 static void efx_fini_eventq(struct efx_channel *channel)
296 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
298 falcon_fini_eventq(channel);
301 static void efx_remove_eventq(struct efx_channel *channel)
303 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
305 falcon_remove_eventq(channel);
308 /**************************************************************************
312 *************************************************************************/
314 static int efx_probe_channel(struct efx_channel *channel)
316 struct efx_tx_queue *tx_queue;
317 struct efx_rx_queue *rx_queue;
320 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
322 rc = efx_probe_eventq(channel);
326 efx_for_each_channel_tx_queue(tx_queue, channel) {
327 rc = efx_probe_tx_queue(tx_queue);
332 efx_for_each_channel_rx_queue(rx_queue, channel) {
333 rc = efx_probe_rx_queue(rx_queue);
338 channel->n_rx_frm_trunc = 0;
343 efx_for_each_channel_rx_queue(rx_queue, channel)
344 efx_remove_rx_queue(rx_queue);
346 efx_for_each_channel_tx_queue(tx_queue, channel)
347 efx_remove_tx_queue(tx_queue);
353 /* Channels are shutdown and reinitialised whilst the NIC is running
354 * to propagate configuration changes (mtu, checksum offload), or
355 * to clear hardware error conditions
357 static void efx_init_channels(struct efx_nic *efx)
359 struct efx_tx_queue *tx_queue;
360 struct efx_rx_queue *rx_queue;
361 struct efx_channel *channel;
363 /* Calculate the rx buffer allocation parameters required to
364 * support the current MTU, including padding for header
365 * alignment and overruns.
367 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
368 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
369 efx->type->rx_buffer_padding);
370 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
372 /* Initialise the channels */
373 efx_for_each_channel(channel, efx) {
374 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
376 efx_init_eventq(channel);
378 efx_for_each_channel_tx_queue(tx_queue, channel)
379 efx_init_tx_queue(tx_queue);
381 /* The rx buffer allocation strategy is MTU dependent */
382 efx_rx_strategy(channel);
384 efx_for_each_channel_rx_queue(rx_queue, channel)
385 efx_init_rx_queue(rx_queue);
387 WARN_ON(channel->rx_pkt != NULL);
388 efx_rx_strategy(channel);
392 /* This enables event queue processing and packet transmission.
394 * Note that this function is not allowed to fail, since that would
395 * introduce too much complexity into the suspend/resume path.
397 static void efx_start_channel(struct efx_channel *channel)
399 struct efx_rx_queue *rx_queue;
401 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
403 if (!(channel->efx->net_dev->flags & IFF_UP))
404 netif_napi_add(channel->napi_dev, &channel->napi_str,
405 efx_poll, napi_weight);
407 /* The interrupt handler for this channel may set work_pending
408 * as soon as we enable it. Make sure it's cleared before
409 * then. Similarly, make sure it sees the enabled flag set. */
410 channel->work_pending = false;
411 channel->enabled = true;
414 napi_enable(&channel->napi_str);
416 /* Load up RX descriptors */
417 efx_for_each_channel_rx_queue(rx_queue, channel)
418 efx_fast_push_rx_descriptors(rx_queue);
421 /* This disables event queue processing and packet transmission.
422 * This function does not guarantee that all queue processing
423 * (e.g. RX refill) is complete.
425 static void efx_stop_channel(struct efx_channel *channel)
427 struct efx_rx_queue *rx_queue;
429 if (!channel->enabled)
432 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
434 channel->enabled = false;
435 napi_disable(&channel->napi_str);
437 /* Ensure that any worker threads have exited or will be no-ops */
438 efx_for_each_channel_rx_queue(rx_queue, channel) {
439 spin_lock_bh(&rx_queue->add_lock);
440 spin_unlock_bh(&rx_queue->add_lock);
444 static void efx_fini_channels(struct efx_nic *efx)
446 struct efx_channel *channel;
447 struct efx_tx_queue *tx_queue;
448 struct efx_rx_queue *rx_queue;
451 EFX_ASSERT_RESET_SERIALISED(efx);
452 BUG_ON(efx->port_enabled);
454 rc = falcon_flush_queues(efx);
456 EFX_ERR(efx, "failed to flush queues\n");
458 EFX_LOG(efx, "successfully flushed all queues\n");
460 efx_for_each_channel(channel, efx) {
461 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
463 efx_for_each_channel_rx_queue(rx_queue, channel)
464 efx_fini_rx_queue(rx_queue);
465 efx_for_each_channel_tx_queue(tx_queue, channel)
466 efx_fini_tx_queue(tx_queue);
467 efx_fini_eventq(channel);
471 static void efx_remove_channel(struct efx_channel *channel)
473 struct efx_tx_queue *tx_queue;
474 struct efx_rx_queue *rx_queue;
476 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
478 efx_for_each_channel_rx_queue(rx_queue, channel)
479 efx_remove_rx_queue(rx_queue);
480 efx_for_each_channel_tx_queue(tx_queue, channel)
481 efx_remove_tx_queue(tx_queue);
482 efx_remove_eventq(channel);
484 channel->used_flags = 0;
487 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
489 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
492 /**************************************************************************
496 **************************************************************************/
498 /* This ensures that the kernel is kept informed (via
499 * netif_carrier_on/off) of the link status, and also maintains the
500 * link status's stop on the port's TX queue.
502 static void efx_link_status_changed(struct efx_nic *efx)
504 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
505 * that no events are triggered between unregister_netdev() and the
506 * driver unloading. A more general condition is that NETDEV_CHANGE
507 * can only be generated between NETDEV_UP and NETDEV_DOWN */
508 if (!netif_running(efx->net_dev))
511 if (efx->port_inhibited) {
512 netif_carrier_off(efx->net_dev);
516 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
517 efx->n_link_state_changes++;
520 netif_carrier_on(efx->net_dev);
522 netif_carrier_off(efx->net_dev);
525 /* Status message for kernel log */
527 struct mii_if_info *gmii = &efx->mii;
529 /* NONE here means direct XAUI from the controller, with no
530 * MDIO-attached device we can query. */
531 if (efx->phy_type != PHY_TYPE_NONE) {
532 adv = gmii_advertised(gmii);
533 lpa = gmii_lpa(gmii);
535 lpa = GM_LPA_10000 | LPA_DUPLEX;
538 EFX_INFO(efx, "link up at %dMbps %s-duplex "
539 "(adv %04x lpa %04x) (MTU %d)%s\n",
540 (efx->link_options & GM_LPA_10000 ? 10000 :
541 (efx->link_options & GM_LPA_1000 ? 1000 :
542 (efx->link_options & GM_LPA_100 ? 100 :
544 (efx->link_options & GM_LPA_DUPLEX ?
548 (efx->promiscuous ? " [PROMISC]" : ""));
550 EFX_INFO(efx, "link down\n");
555 /* This call reinitialises the MAC to pick up new PHY settings. The
556 * caller must hold the mac_lock */
557 void __efx_reconfigure_port(struct efx_nic *efx)
559 WARN_ON(!mutex_is_locked(&efx->mac_lock));
561 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
562 raw_smp_processor_id());
564 /* Serialise the promiscuous flag with efx_set_multicast_list. */
565 if (efx_dev_registered(efx)) {
566 netif_addr_lock_bh(efx->net_dev);
567 netif_addr_unlock_bh(efx->net_dev);
570 falcon_reconfigure_xmac(efx);
572 /* Inform kernel of loss/gain of carrier */
573 efx_link_status_changed(efx);
576 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
578 void efx_reconfigure_port(struct efx_nic *efx)
580 EFX_ASSERT_RESET_SERIALISED(efx);
582 mutex_lock(&efx->mac_lock);
583 __efx_reconfigure_port(efx);
584 mutex_unlock(&efx->mac_lock);
587 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
588 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
589 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
590 static void efx_reconfigure_work(struct work_struct *data)
592 struct efx_nic *efx = container_of(data, struct efx_nic,
595 mutex_lock(&efx->mac_lock);
596 if (efx->port_enabled)
597 __efx_reconfigure_port(efx);
598 mutex_unlock(&efx->mac_lock);
601 static int efx_probe_port(struct efx_nic *efx)
605 EFX_LOG(efx, "create port\n");
607 /* Connect up MAC/PHY operations table and read MAC address */
608 rc = falcon_probe_port(efx);
612 /* Sanity check MAC address */
613 if (is_valid_ether_addr(efx->mac_address)) {
614 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
616 EFX_ERR(efx, "invalid MAC address %pM\n",
618 if (!allow_bad_hwaddr) {
622 random_ether_addr(efx->net_dev->dev_addr);
623 EFX_INFO(efx, "using locally-generated MAC %pM\n",
624 efx->net_dev->dev_addr);
630 efx_remove_port(efx);
634 static int efx_init_port(struct efx_nic *efx)
638 EFX_LOG(efx, "init port\n");
640 /* Initialise the MAC and PHY */
641 rc = falcon_init_xmac(efx);
645 efx->port_initialized = true;
646 efx->stats_enabled = true;
648 /* Reconfigure port to program MAC registers */
649 falcon_reconfigure_xmac(efx);
654 /* Allow efx_reconfigure_port() to be scheduled, and close the window
655 * between efx_stop_port and efx_flush_all whereby a previously scheduled
656 * efx_reconfigure_port() may have been cancelled */
657 static void efx_start_port(struct efx_nic *efx)
659 EFX_LOG(efx, "start port\n");
660 BUG_ON(efx->port_enabled);
662 mutex_lock(&efx->mac_lock);
663 efx->port_enabled = true;
664 __efx_reconfigure_port(efx);
665 mutex_unlock(&efx->mac_lock);
668 /* Prevent efx_reconfigure_work and efx_monitor() from executing, and
669 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
670 * efx_reconfigure_work can still be scheduled via NAPI processing
671 * until efx_flush_all() is called */
672 static void efx_stop_port(struct efx_nic *efx)
674 EFX_LOG(efx, "stop port\n");
676 mutex_lock(&efx->mac_lock);
677 efx->port_enabled = false;
678 mutex_unlock(&efx->mac_lock);
680 /* Serialise against efx_set_multicast_list() */
681 if (efx_dev_registered(efx)) {
682 netif_addr_lock_bh(efx->net_dev);
683 netif_addr_unlock_bh(efx->net_dev);
687 static void efx_fini_port(struct efx_nic *efx)
689 EFX_LOG(efx, "shut down port\n");
691 if (!efx->port_initialized)
694 falcon_fini_xmac(efx);
695 efx->port_initialized = false;
697 efx->link_up = false;
698 efx_link_status_changed(efx);
701 static void efx_remove_port(struct efx_nic *efx)
703 EFX_LOG(efx, "destroying port\n");
705 falcon_remove_port(efx);
708 /**************************************************************************
712 **************************************************************************/
714 /* This configures the PCI device to enable I/O and DMA. */
715 static int efx_init_io(struct efx_nic *efx)
717 struct pci_dev *pci_dev = efx->pci_dev;
718 dma_addr_t dma_mask = efx->type->max_dma_mask;
721 EFX_LOG(efx, "initialising I/O\n");
723 rc = pci_enable_device(pci_dev);
725 EFX_ERR(efx, "failed to enable PCI device\n");
729 pci_set_master(pci_dev);
731 /* Set the PCI DMA mask. Try all possibilities from our
732 * genuine mask down to 32 bits, because some architectures
733 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
734 * masks event though they reject 46 bit masks.
736 while (dma_mask > 0x7fffffffUL) {
737 if (pci_dma_supported(pci_dev, dma_mask) &&
738 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
743 EFX_ERR(efx, "could not find a suitable DMA mask\n");
746 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
747 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
749 /* pci_set_consistent_dma_mask() is not *allowed* to
750 * fail with a mask that pci_set_dma_mask() accepted,
751 * but just in case...
753 EFX_ERR(efx, "failed to set consistent DMA mask\n");
757 efx->membase_phys = pci_resource_start(efx->pci_dev,
759 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
761 EFX_ERR(efx, "request for memory BAR failed\n");
765 efx->membase = ioremap_nocache(efx->membase_phys,
766 efx->type->mem_map_size);
768 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
770 (unsigned long long)efx->membase_phys,
771 efx->type->mem_map_size);
775 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
776 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
777 efx->type->mem_map_size, efx->membase);
782 pci_release_region(efx->pci_dev, efx->type->mem_bar);
784 efx->membase_phys = 0;
786 pci_disable_device(efx->pci_dev);
791 static void efx_fini_io(struct efx_nic *efx)
793 EFX_LOG(efx, "shutting down I/O\n");
796 iounmap(efx->membase);
800 if (efx->membase_phys) {
801 pci_release_region(efx->pci_dev, efx->type->mem_bar);
802 efx->membase_phys = 0;
805 pci_disable_device(efx->pci_dev);
808 /* Get number of RX queues wanted. Return number of online CPU
809 * packages in the expectation that an IRQ balancer will spread
810 * interrupts across them. */
811 static int efx_wanted_rx_queues(void)
817 cpus_clear(core_mask);
819 for_each_online_cpu(cpu) {
820 if (!cpu_isset(cpu, core_mask)) {
822 cpus_or(core_mask, core_mask,
823 topology_core_siblings(cpu));
830 /* Probe the number and type of interrupts we are able to obtain, and
831 * the resulting numbers of channels and RX queues.
833 static void efx_probe_interrupts(struct efx_nic *efx)
836 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
839 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
840 struct msix_entry xentries[EFX_MAX_CHANNELS];
843 /* We want one RX queue and interrupt per CPU package
844 * (or as specified by the rss_cpus module parameter).
845 * We will need one channel per interrupt.
847 wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
848 efx->n_rx_queues = min(wanted_ints, max_channels);
850 for (i = 0; i < efx->n_rx_queues; i++)
851 xentries[i].entry = i;
852 rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
854 EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
855 efx->n_rx_queues = rc;
856 rc = pci_enable_msix(efx->pci_dev, xentries,
861 for (i = 0; i < efx->n_rx_queues; i++)
862 efx->channel[i].irq = xentries[i].vector;
864 /* Fall back to single channel MSI */
865 efx->interrupt_mode = EFX_INT_MODE_MSI;
866 EFX_ERR(efx, "could not enable MSI-X\n");
870 /* Try single interrupt MSI */
871 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
872 efx->n_rx_queues = 1;
873 rc = pci_enable_msi(efx->pci_dev);
875 efx->channel[0].irq = efx->pci_dev->irq;
877 EFX_ERR(efx, "could not enable MSI\n");
878 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
882 /* Assume legacy interrupts */
883 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
884 efx->n_rx_queues = 1;
885 efx->legacy_irq = efx->pci_dev->irq;
889 static void efx_remove_interrupts(struct efx_nic *efx)
891 struct efx_channel *channel;
893 /* Remove MSI/MSI-X interrupts */
894 efx_for_each_channel(channel, efx)
896 pci_disable_msi(efx->pci_dev);
897 pci_disable_msix(efx->pci_dev);
899 /* Remove legacy interrupt */
903 static void efx_set_channels(struct efx_nic *efx)
905 struct efx_tx_queue *tx_queue;
906 struct efx_rx_queue *rx_queue;
908 efx_for_each_tx_queue(tx_queue, efx) {
909 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
910 tx_queue->channel = &efx->channel[1];
912 tx_queue->channel = &efx->channel[0];
913 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
916 efx_for_each_rx_queue(rx_queue, efx) {
917 rx_queue->channel = &efx->channel[rx_queue->queue];
918 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
922 static int efx_probe_nic(struct efx_nic *efx)
926 EFX_LOG(efx, "creating NIC\n");
928 /* Carry out hardware-type specific initialisation */
929 rc = falcon_probe_nic(efx);
933 /* Determine the number of channels and RX queues by trying to hook
934 * in MSI-X interrupts. */
935 efx_probe_interrupts(efx);
937 efx_set_channels(efx);
939 /* Initialise the interrupt moderation settings */
940 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
945 static void efx_remove_nic(struct efx_nic *efx)
947 EFX_LOG(efx, "destroying NIC\n");
949 efx_remove_interrupts(efx);
950 falcon_remove_nic(efx);
953 /**************************************************************************
955 * NIC startup/shutdown
957 *************************************************************************/
959 static int efx_probe_all(struct efx_nic *efx)
961 struct efx_channel *channel;
965 rc = efx_probe_nic(efx);
967 EFX_ERR(efx, "failed to create NIC\n");
972 rc = efx_probe_port(efx);
974 EFX_ERR(efx, "failed to create port\n");
978 /* Create channels */
979 efx_for_each_channel(channel, efx) {
980 rc = efx_probe_channel(channel);
982 EFX_ERR(efx, "failed to create channel %d\n",
991 efx_for_each_channel(channel, efx)
992 efx_remove_channel(channel);
993 efx_remove_port(efx);
1000 /* Called after previous invocation(s) of efx_stop_all, restarts the
1001 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1002 * and ensures that the port is scheduled to be reconfigured.
1003 * This function is safe to call multiple times when the NIC is in any
1005 static void efx_start_all(struct efx_nic *efx)
1007 struct efx_channel *channel;
1009 EFX_ASSERT_RESET_SERIALISED(efx);
1011 /* Check that it is appropriate to restart the interface. All
1012 * of these flags are safe to read under just the rtnl lock */
1013 if (efx->port_enabled)
1015 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1017 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1020 /* Mark the port as enabled so port reconfigurations can start, then
1021 * restart the transmit interface early so the watchdog timer stops */
1022 efx_start_port(efx);
1023 if (efx_dev_registered(efx))
1024 efx_wake_queue(efx);
1026 efx_for_each_channel(channel, efx)
1027 efx_start_channel(channel);
1029 falcon_enable_interrupts(efx);
1031 /* Start hardware monitor if we're in RUNNING */
1032 if (efx->state == STATE_RUNNING)
1033 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1034 efx_monitor_interval);
1037 /* Flush all delayed work. Should only be called when no more delayed work
1038 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1039 * since we're holding the rtnl_lock at this point. */
1040 static void efx_flush_all(struct efx_nic *efx)
1042 struct efx_rx_queue *rx_queue;
1044 /* Make sure the hardware monitor is stopped */
1045 cancel_delayed_work_sync(&efx->monitor_work);
1047 /* Ensure that all RX slow refills are complete. */
1048 efx_for_each_rx_queue(rx_queue, efx)
1049 cancel_delayed_work_sync(&rx_queue->work);
1051 /* Stop scheduled port reconfigurations */
1052 cancel_work_sync(&efx->reconfigure_work);
1056 /* Quiesce hardware and software without bringing the link down.
1057 * Safe to call multiple times, when the nic and interface is in any
1058 * state. The caller is guaranteed to subsequently be in a position
1059 * to modify any hardware and software state they see fit without
1061 static void efx_stop_all(struct efx_nic *efx)
1063 struct efx_channel *channel;
1065 EFX_ASSERT_RESET_SERIALISED(efx);
1067 /* port_enabled can be read safely under the rtnl lock */
1068 if (!efx->port_enabled)
1071 /* Disable interrupts and wait for ISR to complete */
1072 falcon_disable_interrupts(efx);
1073 if (efx->legacy_irq)
1074 synchronize_irq(efx->legacy_irq);
1075 efx_for_each_channel(channel, efx) {
1077 synchronize_irq(channel->irq);
1080 /* Stop all NAPI processing and synchronous rx refills */
1081 efx_for_each_channel(channel, efx)
1082 efx_stop_channel(channel);
1084 /* Stop all asynchronous port reconfigurations. Since all
1085 * event processing has already been stopped, there is no
1086 * window to loose phy events */
1089 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1092 /* Isolate the MAC from the TX and RX engines, so that queue
1093 * flushes will complete in a timely fashion. */
1094 falcon_drain_tx_fifo(efx);
1096 /* Stop the kernel transmit interface late, so the watchdog
1097 * timer isn't ticking over the flush */
1098 if (efx_dev_registered(efx)) {
1099 efx_stop_queue(efx);
1100 netif_tx_lock_bh(efx->net_dev);
1101 netif_tx_unlock_bh(efx->net_dev);
1105 static void efx_remove_all(struct efx_nic *efx)
1107 struct efx_channel *channel;
1109 efx_for_each_channel(channel, efx)
1110 efx_remove_channel(channel);
1111 efx_remove_port(efx);
1112 efx_remove_nic(efx);
1115 /* A convinience function to safely flush all the queues */
1116 void efx_flush_queues(struct efx_nic *efx)
1118 EFX_ASSERT_RESET_SERIALISED(efx);
1122 efx_fini_channels(efx);
1123 efx_init_channels(efx);
1128 /**************************************************************************
1130 * Interrupt moderation
1132 **************************************************************************/
1134 /* Set interrupt moderation parameters */
1135 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1137 struct efx_tx_queue *tx_queue;
1138 struct efx_rx_queue *rx_queue;
1140 EFX_ASSERT_RESET_SERIALISED(efx);
1142 efx_for_each_tx_queue(tx_queue, efx)
1143 tx_queue->channel->irq_moderation = tx_usecs;
1145 efx_for_each_rx_queue(rx_queue, efx)
1146 rx_queue->channel->irq_moderation = rx_usecs;
1149 /**************************************************************************
1153 **************************************************************************/
1155 /* Run periodically off the general workqueue. Serialised against
1156 * efx_reconfigure_port via the mac_lock */
1157 static void efx_monitor(struct work_struct *data)
1159 struct efx_nic *efx = container_of(data, struct efx_nic,
1163 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1164 raw_smp_processor_id());
1167 /* If the mac_lock is already held then it is likely a port
1168 * reconfiguration is already in place, which will likely do
1169 * most of the work of check_hw() anyway. */
1170 if (!mutex_trylock(&efx->mac_lock)) {
1171 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1172 efx_monitor_interval);
1176 if (efx->port_enabled)
1177 rc = falcon_check_xmac(efx);
1178 mutex_unlock(&efx->mac_lock);
1180 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1181 efx_monitor_interval);
1184 /**************************************************************************
1188 *************************************************************************/
1191 * Context: process, rtnl_lock() held.
1193 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1195 struct efx_nic *efx = netdev_priv(net_dev);
1197 EFX_ASSERT_RESET_SERIALISED(efx);
1199 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1202 /**************************************************************************
1206 **************************************************************************/
1208 static int efx_init_napi(struct efx_nic *efx)
1210 struct efx_channel *channel;
1213 efx_for_each_channel(channel, efx) {
1214 channel->napi_dev = efx->net_dev;
1215 rc = efx_lro_init(&channel->lro_mgr, efx);
1225 static void efx_fini_napi(struct efx_nic *efx)
1227 struct efx_channel *channel;
1229 efx_for_each_channel(channel, efx) {
1230 efx_lro_fini(&channel->lro_mgr);
1231 channel->napi_dev = NULL;
1235 /**************************************************************************
1237 * Kernel netpoll interface
1239 *************************************************************************/
1241 #ifdef CONFIG_NET_POLL_CONTROLLER
1243 /* Although in the common case interrupts will be disabled, this is not
1244 * guaranteed. However, all our work happens inside the NAPI callback,
1245 * so no locking is required.
1247 static void efx_netpoll(struct net_device *net_dev)
1249 struct efx_nic *efx = netdev_priv(net_dev);
1250 struct efx_channel *channel;
1252 efx_for_each_channel(channel, efx)
1253 efx_schedule_channel(channel);
1258 /**************************************************************************
1260 * Kernel net device interface
1262 *************************************************************************/
1264 /* Context: process, rtnl_lock() held. */
1265 static int efx_net_open(struct net_device *net_dev)
1267 struct efx_nic *efx = netdev_priv(net_dev);
1268 EFX_ASSERT_RESET_SERIALISED(efx);
1270 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1271 raw_smp_processor_id());
1273 if (efx->phy_mode & PHY_MODE_SPECIAL)
1280 /* Context: process, rtnl_lock() held.
1281 * Note that the kernel will ignore our return code; this method
1282 * should really be a void.
1284 static int efx_net_stop(struct net_device *net_dev)
1286 struct efx_nic *efx = netdev_priv(net_dev);
1288 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1289 raw_smp_processor_id());
1291 /* Stop the device and flush all the channels */
1293 efx_fini_channels(efx);
1294 efx_init_channels(efx);
1299 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1300 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1302 struct efx_nic *efx = netdev_priv(net_dev);
1303 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1304 struct net_device_stats *stats = &net_dev->stats;
1306 /* Update stats if possible, but do not wait if another thread
1307 * is updating them (or resetting the NIC); slightly stale
1308 * stats are acceptable.
1310 if (!spin_trylock(&efx->stats_lock))
1312 if (efx->stats_enabled) {
1313 falcon_update_stats_xmac(efx);
1314 falcon_update_nic_stats(efx);
1316 spin_unlock(&efx->stats_lock);
1318 stats->rx_packets = mac_stats->rx_packets;
1319 stats->tx_packets = mac_stats->tx_packets;
1320 stats->rx_bytes = mac_stats->rx_bytes;
1321 stats->tx_bytes = mac_stats->tx_bytes;
1322 stats->multicast = mac_stats->rx_multicast;
1323 stats->collisions = mac_stats->tx_collision;
1324 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1325 mac_stats->rx_length_error);
1326 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1327 stats->rx_crc_errors = mac_stats->rx_bad;
1328 stats->rx_frame_errors = mac_stats->rx_align_error;
1329 stats->rx_fifo_errors = mac_stats->rx_overflow;
1330 stats->rx_missed_errors = mac_stats->rx_missed;
1331 stats->tx_window_errors = mac_stats->tx_late_collision;
1333 stats->rx_errors = (stats->rx_length_errors +
1334 stats->rx_over_errors +
1335 stats->rx_crc_errors +
1336 stats->rx_frame_errors +
1337 stats->rx_fifo_errors +
1338 stats->rx_missed_errors +
1339 mac_stats->rx_symbol_error);
1340 stats->tx_errors = (stats->tx_window_errors +
1346 /* Context: netif_tx_lock held, BHs disabled. */
1347 static void efx_watchdog(struct net_device *net_dev)
1349 struct efx_nic *efx = netdev_priv(net_dev);
1351 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
1352 " resetting channels\n",
1353 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1355 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1359 /* Context: process, rtnl_lock() held. */
1360 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1362 struct efx_nic *efx = netdev_priv(net_dev);
1365 EFX_ASSERT_RESET_SERIALISED(efx);
1367 if (new_mtu > EFX_MAX_MTU)
1372 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1374 efx_fini_channels(efx);
1375 net_dev->mtu = new_mtu;
1376 efx_init_channels(efx);
1382 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1384 struct efx_nic *efx = netdev_priv(net_dev);
1385 struct sockaddr *addr = data;
1386 char *new_addr = addr->sa_data;
1388 EFX_ASSERT_RESET_SERIALISED(efx);
1390 if (!is_valid_ether_addr(new_addr)) {
1391 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
1396 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1398 /* Reconfigure the MAC */
1399 efx_reconfigure_port(efx);
1404 /* Context: netif_addr_lock held, BHs disabled. */
1405 static void efx_set_multicast_list(struct net_device *net_dev)
1407 struct efx_nic *efx = netdev_priv(net_dev);
1408 struct dev_mc_list *mc_list = net_dev->mc_list;
1409 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1410 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1411 bool changed = (efx->promiscuous != promiscuous);
1416 efx->promiscuous = promiscuous;
1418 /* Build multicast hash table */
1419 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1420 memset(mc_hash, 0xff, sizeof(*mc_hash));
1422 memset(mc_hash, 0x00, sizeof(*mc_hash));
1423 for (i = 0; i < net_dev->mc_count; i++) {
1424 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1425 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1426 set_bit_le(bit, mc_hash->byte);
1427 mc_list = mc_list->next;
1431 if (!efx->port_enabled)
1432 /* Delay pushing settings until efx_start_port() */
1436 queue_work(efx->workqueue, &efx->reconfigure_work);
1438 /* Create and activate new global multicast hash table */
1439 falcon_set_multicast_hash(efx);
1442 static const struct net_device_ops efx_netdev_ops = {
1443 .ndo_open = efx_net_open,
1444 .ndo_stop = efx_net_stop,
1445 .ndo_get_stats = efx_net_stats,
1446 .ndo_tx_timeout = efx_watchdog,
1447 .ndo_start_xmit = efx_hard_start_xmit,
1448 .ndo_validate_addr = eth_validate_addr,
1449 .ndo_do_ioctl = efx_ioctl,
1450 .ndo_change_mtu = efx_change_mtu,
1451 .ndo_set_mac_address = efx_set_mac_address,
1452 .ndo_set_multicast_list = efx_set_multicast_list,
1453 #ifdef CONFIG_NET_POLL_CONTROLLER
1454 .ndo_poll_controller = efx_netpoll,
1458 static int efx_netdev_event(struct notifier_block *this,
1459 unsigned long event, void *ptr)
1461 struct net_device *net_dev = ptr;
1463 if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) {
1464 struct efx_nic *efx = netdev_priv(net_dev);
1466 strcpy(efx->name, net_dev->name);
1467 efx_mtd_rename(efx);
1473 static struct notifier_block efx_netdev_notifier = {
1474 .notifier_call = efx_netdev_event,
1477 static int efx_register_netdev(struct efx_nic *efx)
1479 struct net_device *net_dev = efx->net_dev;
1482 net_dev->watchdog_timeo = 5 * HZ;
1483 net_dev->irq = efx->pci_dev->irq;
1484 net_dev->netdev_ops = &efx_netdev_ops;
1485 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1486 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1488 /* Always start with carrier off; PHY events will detect the link */
1489 netif_carrier_off(efx->net_dev);
1491 /* Clear MAC statistics */
1492 falcon_update_stats_xmac(efx);
1493 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1495 rc = register_netdev(net_dev);
1497 EFX_ERR(efx, "could not register net dev\n");
1500 strcpy(efx->name, net_dev->name);
1505 static void efx_unregister_netdev(struct efx_nic *efx)
1507 struct efx_tx_queue *tx_queue;
1512 BUG_ON(netdev_priv(efx->net_dev) != efx);
1514 /* Free up any skbs still remaining. This has to happen before
1515 * we try to unregister the netdev as running their destructors
1516 * may be needed to get the device ref. count to 0. */
1517 efx_for_each_tx_queue(tx_queue, efx)
1518 efx_release_tx_buffers(tx_queue);
1520 if (efx_dev_registered(efx)) {
1521 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1522 unregister_netdev(efx->net_dev);
1526 /**************************************************************************
1528 * Device reset and suspend
1530 **************************************************************************/
1532 /* Tears down the entire software state and most of the hardware state
1534 void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1538 EFX_ASSERT_RESET_SERIALISED(efx);
1540 /* The net_dev->get_stats handler is quite slow, and will fail
1541 * if a fetch is pending over reset. Serialise against it. */
1542 spin_lock(&efx->stats_lock);
1543 efx->stats_enabled = false;
1544 spin_unlock(&efx->stats_lock);
1547 mutex_lock(&efx->mac_lock);
1548 mutex_lock(&efx->spi_lock);
1550 rc = falcon_xmac_get_settings(efx, ecmd);
1552 EFX_ERR(efx, "could not back up PHY settings\n");
1554 efx_fini_channels(efx);
1557 /* This function will always ensure that the locks acquired in
1558 * efx_reset_down() are released. A failure return code indicates
1559 * that we were unable to reinitialise the hardware, and the
1560 * driver should be disabled. If ok is false, then the rx and tx
1561 * engines are not restarted, pending a RESET_DISABLE. */
1562 int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1566 EFX_ASSERT_RESET_SERIALISED(efx);
1568 rc = falcon_init_nic(efx);
1570 EFX_ERR(efx, "failed to initialise NIC\n");
1575 efx_init_channels(efx);
1577 if (falcon_xmac_set_settings(efx, ecmd))
1578 EFX_ERR(efx, "could not restore PHY settings\n");
1581 mutex_unlock(&efx->spi_lock);
1582 mutex_unlock(&efx->mac_lock);
1586 efx->stats_enabled = true;
1591 /* Reset the NIC as transparently as possible. Do not reset the PHY
1592 * Note that the reset may fail, in which case the card will be left
1593 * in a most-probably-unusable state.
1595 * This function will sleep. You cannot reset from within an atomic
1596 * state; use efx_schedule_reset() instead.
1598 * Grabs the rtnl_lock.
1600 static int efx_reset(struct efx_nic *efx)
1602 struct ethtool_cmd ecmd;
1603 enum reset_type method = efx->reset_pending;
1606 /* Serialise with kernel interfaces */
1609 /* If we're not RUNNING then don't reset. Leave the reset_pending
1610 * flag set so that efx_pci_probe_main will be retried */
1611 if (efx->state != STATE_RUNNING) {
1612 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1616 EFX_INFO(efx, "resetting (%d)\n", method);
1618 efx_reset_down(efx, &ecmd);
1620 rc = falcon_reset_hw(efx, method);
1622 EFX_ERR(efx, "failed to reset hardware\n");
1626 /* Allow resets to be rescheduled. */
1627 efx->reset_pending = RESET_TYPE_NONE;
1629 /* Reinitialise bus-mastering, which may have been turned off before
1630 * the reset was scheduled. This is still appropriate, even in the
1631 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1632 * can respond to requests. */
1633 pci_set_master(efx->pci_dev);
1635 /* Leave device stopped if necessary */
1636 if (method == RESET_TYPE_DISABLE) {
1641 rc = efx_reset_up(efx, &ecmd, true);
1645 EFX_LOG(efx, "reset complete\n");
1651 efx_reset_up(efx, &ecmd, false);
1653 EFX_ERR(efx, "has been disabled\n");
1654 efx->state = STATE_DISABLED;
1657 efx_unregister_netdev(efx);
1662 /* The worker thread exists so that code that cannot sleep can
1663 * schedule a reset for later.
1665 static void efx_reset_work(struct work_struct *data)
1667 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1672 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1674 enum reset_type method;
1676 if (efx->reset_pending != RESET_TYPE_NONE) {
1677 EFX_INFO(efx, "quenching already scheduled reset\n");
1682 case RESET_TYPE_INVISIBLE:
1683 case RESET_TYPE_ALL:
1684 case RESET_TYPE_WORLD:
1685 case RESET_TYPE_DISABLE:
1688 case RESET_TYPE_RX_RECOVERY:
1689 case RESET_TYPE_RX_DESC_FETCH:
1690 case RESET_TYPE_TX_DESC_FETCH:
1691 case RESET_TYPE_TX_SKIP:
1692 method = RESET_TYPE_INVISIBLE;
1695 method = RESET_TYPE_ALL;
1700 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1702 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1704 efx->reset_pending = method;
1706 queue_work(reset_workqueue, &efx->reset_work);
1709 /**************************************************************************
1711 * List of NICs we support
1713 **************************************************************************/
1715 /* PCI device ID table */
1716 static struct pci_device_id efx_pci_table[] __devinitdata = {
1717 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1718 .driver_data = (unsigned long) &falcon_a_nic_type},
1719 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1720 .driver_data = (unsigned long) &falcon_b_nic_type},
1721 {0} /* end of list */
1724 /**************************************************************************
1726 * Dummy PHY/MAC/Board operations
1728 * Can be used for some unimplemented operations
1729 * Needed so all function pointers are valid and do not have to be tested
1732 **************************************************************************/
1733 int efx_port_dummy_op_int(struct efx_nic *efx)
1737 void efx_port_dummy_op_void(struct efx_nic *efx) {}
1738 void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1740 static struct efx_phy_operations efx_dummy_phy_operations = {
1741 .init = efx_port_dummy_op_int,
1742 .reconfigure = efx_port_dummy_op_void,
1743 .check_hw = efx_port_dummy_op_int,
1744 .fini = efx_port_dummy_op_void,
1745 .clear_interrupt = efx_port_dummy_op_void,
1748 static struct efx_board efx_dummy_board_info = {
1749 .init = efx_port_dummy_op_int,
1750 .init_leds = efx_port_dummy_op_int,
1751 .set_fault_led = efx_port_dummy_op_blink,
1752 .monitor = efx_port_dummy_op_int,
1753 .blink = efx_port_dummy_op_blink,
1754 .fini = efx_port_dummy_op_void,
1757 /**************************************************************************
1761 **************************************************************************/
1763 /* This zeroes out and then fills in the invariants in a struct
1764 * efx_nic (including all sub-structures).
1766 static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1767 struct pci_dev *pci_dev, struct net_device *net_dev)
1769 struct efx_channel *channel;
1770 struct efx_tx_queue *tx_queue;
1771 struct efx_rx_queue *rx_queue;
1774 /* Initialise common structures */
1775 memset(efx, 0, sizeof(*efx));
1776 spin_lock_init(&efx->biu_lock);
1777 spin_lock_init(&efx->phy_lock);
1778 mutex_init(&efx->spi_lock);
1779 INIT_WORK(&efx->reset_work, efx_reset_work);
1780 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1781 efx->pci_dev = pci_dev;
1782 efx->state = STATE_INIT;
1783 efx->reset_pending = RESET_TYPE_NONE;
1784 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1785 efx->board_info = efx_dummy_board_info;
1787 efx->net_dev = net_dev;
1788 efx->rx_checksum_enabled = true;
1789 spin_lock_init(&efx->netif_stop_lock);
1790 spin_lock_init(&efx->stats_lock);
1791 mutex_init(&efx->mac_lock);
1792 efx->phy_op = &efx_dummy_phy_operations;
1793 efx->mii.dev = net_dev;
1794 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1795 atomic_set(&efx->netif_stop_count, 1);
1797 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1798 channel = &efx->channel[i];
1800 channel->channel = i;
1801 channel->work_pending = false;
1803 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1804 tx_queue = &efx->tx_queue[i];
1805 tx_queue->efx = efx;
1806 tx_queue->queue = i;
1807 tx_queue->buffer = NULL;
1808 tx_queue->channel = &efx->channel[0]; /* for safety */
1809 tx_queue->tso_headers_free = NULL;
1811 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1812 rx_queue = &efx->rx_queue[i];
1813 rx_queue->efx = efx;
1814 rx_queue->queue = i;
1815 rx_queue->channel = &efx->channel[0]; /* for safety */
1816 rx_queue->buffer = NULL;
1817 spin_lock_init(&rx_queue->add_lock);
1818 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1823 /* Sanity-check NIC type */
1824 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1825 (efx->type->txd_ring_mask + 1));
1826 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1827 (efx->type->rxd_ring_mask + 1));
1828 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1829 (efx->type->evq_size - 1));
1830 /* As close as we can get to guaranteeing that we don't overflow */
1831 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1832 (efx->type->txd_ring_mask + 1 +
1833 efx->type->rxd_ring_mask + 1));
1834 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1836 /* Higher numbered interrupt modes are less capable! */
1837 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1840 efx->workqueue = create_singlethread_workqueue("sfc_work");
1841 if (!efx->workqueue)
1847 static void efx_fini_struct(struct efx_nic *efx)
1849 if (efx->workqueue) {
1850 destroy_workqueue(efx->workqueue);
1851 efx->workqueue = NULL;
1855 /**************************************************************************
1859 **************************************************************************/
1861 /* Main body of final NIC shutdown code
1862 * This is called only at module unload (or hotplug removal).
1864 static void efx_pci_remove_main(struct efx_nic *efx)
1866 EFX_ASSERT_RESET_SERIALISED(efx);
1868 /* Skip everything if we never obtained a valid membase */
1872 efx_fini_channels(efx);
1875 /* Shutdown the board, then the NIC and board state */
1876 efx->board_info.fini(efx);
1877 falcon_fini_interrupt(efx);
1880 efx_remove_all(efx);
1883 /* Final NIC shutdown
1884 * This is called only at module unload (or hotplug removal).
1886 static void efx_pci_remove(struct pci_dev *pci_dev)
1888 struct efx_nic *efx;
1890 efx = pci_get_drvdata(pci_dev);
1894 efx_mtd_remove(efx);
1896 /* Mark the NIC as fini, then stop the interface */
1898 efx->state = STATE_FINI;
1899 dev_close(efx->net_dev);
1901 /* Allow any queued efx_resets() to complete */
1904 if (efx->membase == NULL)
1907 efx_unregister_netdev(efx);
1909 /* Wait for any scheduled resets to complete. No more will be
1910 * scheduled from this point because efx_stop_all() has been
1911 * called, we are no longer registered with driverlink, and
1912 * the net_device's have been removed. */
1913 cancel_work_sync(&efx->reset_work);
1915 efx_pci_remove_main(efx);
1919 EFX_LOG(efx, "shutdown successful\n");
1921 pci_set_drvdata(pci_dev, NULL);
1922 efx_fini_struct(efx);
1923 free_netdev(efx->net_dev);
1926 /* Main body of NIC initialisation
1927 * This is called at module load (or hotplug insertion, theoretically).
1929 static int efx_pci_probe_main(struct efx_nic *efx)
1933 /* Do start-of-day initialisation */
1934 rc = efx_probe_all(efx);
1938 rc = efx_init_napi(efx);
1942 /* Initialise the board */
1943 rc = efx->board_info.init(efx);
1945 EFX_ERR(efx, "failed to initialise board\n");
1949 rc = falcon_init_nic(efx);
1951 EFX_ERR(efx, "failed to initialise NIC\n");
1955 rc = efx_init_port(efx);
1957 EFX_ERR(efx, "failed to initialise port\n");
1961 efx_init_channels(efx);
1963 rc = falcon_init_interrupt(efx);
1970 efx_fini_channels(efx);
1974 efx->board_info.fini(efx);
1978 efx_remove_all(efx);
1983 /* NIC initialisation
1985 * This is called at module load (or hotplug insertion,
1986 * theoretically). It sets up PCI mappings, tests and resets the NIC,
1987 * sets up and registers the network devices with the kernel and hooks
1988 * the interrupt service routine. It does not prepare the device for
1989 * transmission; this is left to the first time one of the network
1990 * interfaces is brought up (i.e. efx_net_open).
1992 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
1993 const struct pci_device_id *entry)
1995 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
1996 struct net_device *net_dev;
1997 struct efx_nic *efx;
2000 /* Allocate and initialise a struct net_device and struct efx_nic */
2001 net_dev = alloc_etherdev(sizeof(*efx));
2004 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2005 NETIF_F_HIGHDMA | NETIF_F_TSO);
2007 net_dev->features |= NETIF_F_LRO;
2008 /* Mask for features that also apply to VLAN devices */
2009 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2010 NETIF_F_HIGHDMA | NETIF_F_TSO);
2011 efx = netdev_priv(net_dev);
2012 pci_set_drvdata(pci_dev, efx);
2013 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2017 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2019 /* Set up basic I/O (BAR mappings etc) */
2020 rc = efx_init_io(efx);
2024 /* No serialisation is required with the reset path because
2025 * we're in STATE_INIT. */
2026 for (i = 0; i < 5; i++) {
2027 rc = efx_pci_probe_main(efx);
2031 /* Serialise against efx_reset(). No more resets will be
2032 * scheduled since efx_stop_all() has been called, and we
2033 * have not and never have been registered with either
2034 * the rtnetlink or driverlink layers. */
2035 cancel_work_sync(&efx->reset_work);
2037 /* Retry if a recoverably reset event has been scheduled */
2038 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2039 (efx->reset_pending != RESET_TYPE_ALL))
2042 efx->reset_pending = RESET_TYPE_NONE;
2046 EFX_ERR(efx, "Could not reset NIC\n");
2050 /* Switch to the running state before we expose the device to
2051 * the OS. This is to ensure that the initial gathering of
2052 * MAC stats succeeds. */
2054 efx->state = STATE_RUNNING;
2057 rc = efx_register_netdev(efx);
2061 EFX_LOG(efx, "initialisation successful\n");
2063 efx_mtd_probe(efx); /* allowed to fail */
2067 efx_pci_remove_main(efx);
2072 efx_fini_struct(efx);
2074 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2075 free_netdev(net_dev);
2079 static struct pci_driver efx_pci_driver = {
2080 .name = EFX_DRIVER_NAME,
2081 .id_table = efx_pci_table,
2082 .probe = efx_pci_probe,
2083 .remove = efx_pci_remove,
2086 /**************************************************************************
2088 * Kernel module interface
2090 *************************************************************************/
2092 module_param(interrupt_mode, uint, 0444);
2093 MODULE_PARM_DESC(interrupt_mode,
2094 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2096 static int __init efx_init_module(void)
2100 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2102 rc = register_netdevice_notifier(&efx_netdev_notifier);
2106 refill_workqueue = create_workqueue("sfc_refill");
2107 if (!refill_workqueue) {
2111 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2112 if (!reset_workqueue) {
2117 rc = pci_register_driver(&efx_pci_driver);
2124 destroy_workqueue(reset_workqueue);
2126 destroy_workqueue(refill_workqueue);
2128 unregister_netdevice_notifier(&efx_netdev_notifier);
2133 static void __exit efx_exit_module(void)
2135 printk(KERN_INFO "Solarflare NET driver unloading\n");
2137 pci_unregister_driver(&efx_pci_driver);
2138 destroy_workqueue(reset_workqueue);
2139 destroy_workqueue(refill_workqueue);
2140 unregister_netdevice_notifier(&efx_netdev_notifier);
2144 module_init(efx_init_module);
2145 module_exit(efx_exit_module);
2147 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2148 "Solarflare Communications");
2149 MODULE_DESCRIPTION("Solarflare Communications network driver");
2150 MODULE_LICENSE("GPL");
2151 MODULE_DEVICE_TABLE(pci, efx_pci_table);