1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
28 /**************************************************************************
32 **************************************************************************
35 /* Loopback mode names (see LOOPBACK_MODE()) */
36 const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
37 const char *efx_loopback_mode_names[] = {
38 [LOOPBACK_NONE] = "NONE",
39 [LOOPBACK_GMAC] = "GMAC",
40 [LOOPBACK_XGMII] = "XGMII",
41 [LOOPBACK_XGXS] = "XGXS",
42 [LOOPBACK_XAUI] = "XAUI",
43 [LOOPBACK_GPHY] = "GPHY",
44 [LOOPBACK_PHYXS] = "PHYXS",
45 [LOOPBACK_PCS] = "PCS",
46 [LOOPBACK_PMAPMD] = "PMA/PMD",
47 [LOOPBACK_NETWORK] = "NETWORK",
50 /* Interrupt mode names (see INT_MODE())) */
51 const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
52 const char *efx_interrupt_mode_names[] = {
53 [EFX_INT_MODE_MSIX] = "MSI-X",
54 [EFX_INT_MODE_MSI] = "MSI",
55 [EFX_INT_MODE_LEGACY] = "legacy",
58 const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
59 const char *efx_reset_type_names[] = {
60 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
61 [RESET_TYPE_ALL] = "ALL",
62 [RESET_TYPE_WORLD] = "WORLD",
63 [RESET_TYPE_DISABLE] = "DISABLE",
64 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
65 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
66 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
67 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
68 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
69 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
72 #define EFX_MAX_MTU (9 * 1024)
74 /* RX slow fill workqueue. If memory allocation fails in the fast path,
75 * a work item is pushed onto this work queue to retry the allocation later,
76 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
77 * workqueue, there is nothing to be gained in making it per NIC
79 static struct workqueue_struct *refill_workqueue;
81 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
82 * queued onto this work queue. This is not a per-nic work queue, because
83 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
85 static struct workqueue_struct *reset_workqueue;
87 /**************************************************************************
91 *************************************************************************/
94 * Use separate channels for TX and RX events
96 * Set this to 1 to use separate channels for TX and RX. It allows us
97 * to control interrupt affinity separately for TX and RX.
99 * This is only used in MSI-X interrupt mode
101 static unsigned int separate_tx_channels;
102 module_param(separate_tx_channels, uint, 0644);
103 MODULE_PARM_DESC(separate_tx_channels,
104 "Use separate channels for TX and RX");
106 /* This is the weight assigned to each of the (per-channel) virtual
109 static int napi_weight = 64;
111 /* This is the time (in jiffies) between invocations of the hardware
112 * monitor, which checks for known hardware bugs and resets the
113 * hardware and driver as necessary.
115 unsigned int efx_monitor_interval = 1 * HZ;
117 /* This controls whether or not the driver will initialise devices
118 * with invalid MAC addresses stored in the EEPROM or flash. If true,
119 * such devices will be initialised with a random locally-generated
120 * MAC address. This allows for loading the sfc_mtd driver to
121 * reprogram the flash, even if the flash contents (including the MAC
122 * address) have previously been erased.
124 static unsigned int allow_bad_hwaddr;
126 /* Initial interrupt moderation settings. They can be modified after
127 * module load with ethtool.
129 * The default for RX should strike a balance between increasing the
130 * round-trip latency and reducing overhead.
132 static unsigned int rx_irq_mod_usec = 60;
134 /* Initial interrupt moderation settings. They can be modified after
135 * module load with ethtool.
137 * This default is chosen to ensure that a 10G link does not go idle
138 * while a TX queue is stopped after it has become full. A queue is
139 * restarted when it drops below half full. The time this takes (assuming
140 * worst case 3 descriptors per packet and 1024 descriptors) is
141 * 512 / 3 * 1.2 = 205 usec.
143 static unsigned int tx_irq_mod_usec = 150;
145 /* This is the first interrupt mode to try out of:
150 static unsigned int interrupt_mode;
152 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
153 * i.e. the number of CPUs among which we may distribute simultaneous
154 * interrupt handling.
156 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
157 * The default (0) means to assign an interrupt to each package (level II cache)
159 static unsigned int rss_cpus;
160 module_param(rss_cpus, uint, 0444);
161 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
163 static int phy_flash_cfg;
164 module_param(phy_flash_cfg, int, 0644);
165 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
167 static unsigned irq_adapt_low_thresh = 10000;
168 module_param(irq_adapt_low_thresh, uint, 0644);
169 MODULE_PARM_DESC(irq_adapt_low_thresh,
170 "Threshold score for reducing IRQ moderation");
172 static unsigned irq_adapt_high_thresh = 20000;
173 module_param(irq_adapt_high_thresh, uint, 0644);
174 MODULE_PARM_DESC(irq_adapt_high_thresh,
175 "Threshold score for increasing IRQ moderation");
177 /**************************************************************************
179 * Utility functions and prototypes
181 *************************************************************************/
182 static void efx_remove_channel(struct efx_channel *channel);
183 static void efx_remove_port(struct efx_nic *efx);
184 static void efx_fini_napi(struct efx_nic *efx);
185 static void efx_fini_channels(struct efx_nic *efx);
187 #define EFX_ASSERT_RESET_SERIALISED(efx) \
189 if ((efx->state == STATE_RUNNING) || \
190 (efx->state == STATE_DISABLED)) \
194 /**************************************************************************
196 * Event queue processing
198 *************************************************************************/
200 /* Process channel's event queue
202 * This function is responsible for processing the event queue of a
203 * single channel. The caller must guarantee that this function will
204 * never be concurrently called more than once on the same channel,
205 * though different channels may be being processed concurrently.
207 static int efx_process_channel(struct efx_channel *channel, int rx_quota)
209 struct efx_nic *efx = channel->efx;
212 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
216 rx_packets = falcon_process_eventq(channel, rx_quota);
220 /* Deliver last RX packet. */
221 if (channel->rx_pkt) {
222 __efx_rx_packet(channel, channel->rx_pkt,
223 channel->rx_pkt_csummed);
224 channel->rx_pkt = NULL;
227 efx_rx_strategy(channel);
229 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
234 /* Mark channel as finished processing
236 * Note that since we will not receive further interrupts for this
237 * channel before we finish processing and call the eventq_read_ack()
238 * method, there is no need to use the interrupt hold-off timers.
240 static inline void efx_channel_processed(struct efx_channel *channel)
242 /* The interrupt handler for this channel may set work_pending
243 * as soon as we acknowledge the events we've seen. Make sure
244 * it's cleared before then. */
245 channel->work_pending = false;
248 falcon_eventq_read_ack(channel);
253 * NAPI guarantees serialisation of polls of the same device, which
254 * provides the guarantee required by efx_process_channel().
256 static int efx_poll(struct napi_struct *napi, int budget)
258 struct efx_channel *channel =
259 container_of(napi, struct efx_channel, napi_str);
262 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
263 channel->channel, raw_smp_processor_id());
265 rx_packets = efx_process_channel(channel, budget);
267 if (rx_packets < budget) {
268 struct efx_nic *efx = channel->efx;
270 if (channel->used_flags & EFX_USED_BY_RX &&
271 efx->irq_rx_adaptive &&
272 unlikely(++channel->irq_count == 1000)) {
273 if (unlikely(channel->irq_mod_score <
274 irq_adapt_low_thresh)) {
275 if (channel->irq_moderation > 1) {
276 channel->irq_moderation -= 1;
277 falcon_set_int_moderation(channel);
279 } else if (unlikely(channel->irq_mod_score >
280 irq_adapt_high_thresh)) {
281 if (channel->irq_moderation <
282 efx->irq_rx_moderation) {
283 channel->irq_moderation += 1;
284 falcon_set_int_moderation(channel);
287 channel->irq_count = 0;
288 channel->irq_mod_score = 0;
291 /* There is no race here; although napi_disable() will
292 * only wait for napi_complete(), this isn't a problem
293 * since efx_channel_processed() will have no effect if
294 * interrupts have already been disabled.
297 efx_channel_processed(channel);
303 /* Process the eventq of the specified channel immediately on this CPU
305 * Disable hardware generated interrupts, wait for any existing
306 * processing to finish, then directly poll (and ack ) the eventq.
307 * Finally reenable NAPI and interrupts.
309 * Since we are touching interrupts the caller should hold the suspend lock
311 void efx_process_channel_now(struct efx_channel *channel)
313 struct efx_nic *efx = channel->efx;
315 BUG_ON(!channel->used_flags);
316 BUG_ON(!channel->enabled);
318 /* Disable interrupts and wait for ISRs to complete */
319 falcon_disable_interrupts(efx);
321 synchronize_irq(efx->legacy_irq);
323 synchronize_irq(channel->irq);
325 /* Wait for any NAPI processing to complete */
326 napi_disable(&channel->napi_str);
328 /* Poll the channel */
329 efx_process_channel(channel, EFX_EVQ_SIZE);
331 /* Ack the eventq. This may cause an interrupt to be generated
332 * when they are reenabled */
333 efx_channel_processed(channel);
335 napi_enable(&channel->napi_str);
336 falcon_enable_interrupts(efx);
339 /* Create event queue
340 * Event queue memory allocations are done only once. If the channel
341 * is reset, the memory buffer will be reused; this guards against
342 * errors during channel reset and also simplifies interrupt handling.
344 static int efx_probe_eventq(struct efx_channel *channel)
346 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
348 return falcon_probe_eventq(channel);
351 /* Prepare channel's event queue */
352 static void efx_init_eventq(struct efx_channel *channel)
354 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
356 channel->eventq_read_ptr = 0;
358 falcon_init_eventq(channel);
361 static void efx_fini_eventq(struct efx_channel *channel)
363 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
365 falcon_fini_eventq(channel);
368 static void efx_remove_eventq(struct efx_channel *channel)
370 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
372 falcon_remove_eventq(channel);
375 /**************************************************************************
379 *************************************************************************/
381 static int efx_probe_channel(struct efx_channel *channel)
383 struct efx_tx_queue *tx_queue;
384 struct efx_rx_queue *rx_queue;
387 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
389 rc = efx_probe_eventq(channel);
393 efx_for_each_channel_tx_queue(tx_queue, channel) {
394 rc = efx_probe_tx_queue(tx_queue);
399 efx_for_each_channel_rx_queue(rx_queue, channel) {
400 rc = efx_probe_rx_queue(rx_queue);
405 channel->n_rx_frm_trunc = 0;
410 efx_for_each_channel_rx_queue(rx_queue, channel)
411 efx_remove_rx_queue(rx_queue);
413 efx_for_each_channel_tx_queue(tx_queue, channel)
414 efx_remove_tx_queue(tx_queue);
420 static void efx_set_channel_names(struct efx_nic *efx)
422 struct efx_channel *channel;
423 const char *type = "";
426 efx_for_each_channel(channel, efx) {
427 number = channel->channel;
428 if (efx->n_channels > efx->n_rx_queues) {
429 if (channel->channel < efx->n_rx_queues) {
433 number -= efx->n_rx_queues;
436 snprintf(channel->name, sizeof(channel->name),
437 "%s%s-%d", efx->name, type, number);
441 /* Channels are shutdown and reinitialised whilst the NIC is running
442 * to propagate configuration changes (mtu, checksum offload), or
443 * to clear hardware error conditions
445 static void efx_init_channels(struct efx_nic *efx)
447 struct efx_tx_queue *tx_queue;
448 struct efx_rx_queue *rx_queue;
449 struct efx_channel *channel;
451 /* Calculate the rx buffer allocation parameters required to
452 * support the current MTU, including padding for header
453 * alignment and overruns.
455 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
456 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
457 efx->type->rx_buffer_padding);
458 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
460 /* Initialise the channels */
461 efx_for_each_channel(channel, efx) {
462 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
464 efx_init_eventq(channel);
466 efx_for_each_channel_tx_queue(tx_queue, channel)
467 efx_init_tx_queue(tx_queue);
469 /* The rx buffer allocation strategy is MTU dependent */
470 efx_rx_strategy(channel);
472 efx_for_each_channel_rx_queue(rx_queue, channel)
473 efx_init_rx_queue(rx_queue);
475 WARN_ON(channel->rx_pkt != NULL);
476 efx_rx_strategy(channel);
480 /* This enables event queue processing and packet transmission.
482 * Note that this function is not allowed to fail, since that would
483 * introduce too much complexity into the suspend/resume path.
485 static void efx_start_channel(struct efx_channel *channel)
487 struct efx_rx_queue *rx_queue;
489 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
491 /* The interrupt handler for this channel may set work_pending
492 * as soon as we enable it. Make sure it's cleared before
493 * then. Similarly, make sure it sees the enabled flag set. */
494 channel->work_pending = false;
495 channel->enabled = true;
498 napi_enable(&channel->napi_str);
500 /* Load up RX descriptors */
501 efx_for_each_channel_rx_queue(rx_queue, channel)
502 efx_fast_push_rx_descriptors(rx_queue);
505 /* This disables event queue processing and packet transmission.
506 * This function does not guarantee that all queue processing
507 * (e.g. RX refill) is complete.
509 static void efx_stop_channel(struct efx_channel *channel)
511 struct efx_rx_queue *rx_queue;
513 if (!channel->enabled)
516 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
518 channel->enabled = false;
519 napi_disable(&channel->napi_str);
521 /* Ensure that any worker threads have exited or will be no-ops */
522 efx_for_each_channel_rx_queue(rx_queue, channel) {
523 spin_lock_bh(&rx_queue->add_lock);
524 spin_unlock_bh(&rx_queue->add_lock);
528 static void efx_fini_channels(struct efx_nic *efx)
530 struct efx_channel *channel;
531 struct efx_tx_queue *tx_queue;
532 struct efx_rx_queue *rx_queue;
535 EFX_ASSERT_RESET_SERIALISED(efx);
536 BUG_ON(efx->port_enabled);
538 rc = falcon_flush_queues(efx);
540 EFX_ERR(efx, "failed to flush queues\n");
542 EFX_LOG(efx, "successfully flushed all queues\n");
544 efx_for_each_channel(channel, efx) {
545 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
547 efx_for_each_channel_rx_queue(rx_queue, channel)
548 efx_fini_rx_queue(rx_queue);
549 efx_for_each_channel_tx_queue(tx_queue, channel)
550 efx_fini_tx_queue(tx_queue);
551 efx_fini_eventq(channel);
555 static void efx_remove_channel(struct efx_channel *channel)
557 struct efx_tx_queue *tx_queue;
558 struct efx_rx_queue *rx_queue;
560 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
562 efx_for_each_channel_rx_queue(rx_queue, channel)
563 efx_remove_rx_queue(rx_queue);
564 efx_for_each_channel_tx_queue(tx_queue, channel)
565 efx_remove_tx_queue(tx_queue);
566 efx_remove_eventq(channel);
568 channel->used_flags = 0;
571 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
573 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
576 /**************************************************************************
580 **************************************************************************/
582 /* This ensures that the kernel is kept informed (via
583 * netif_carrier_on/off) of the link status, and also maintains the
584 * link status's stop on the port's TX queue.
586 static void efx_link_status_changed(struct efx_nic *efx)
588 struct efx_link_state *link_state = &efx->link_state;
590 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
591 * that no events are triggered between unregister_netdev() and the
592 * driver unloading. A more general condition is that NETDEV_CHANGE
593 * can only be generated between NETDEV_UP and NETDEV_DOWN */
594 if (!netif_running(efx->net_dev))
597 if (efx->port_inhibited) {
598 netif_carrier_off(efx->net_dev);
602 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
603 efx->n_link_state_changes++;
606 netif_carrier_on(efx->net_dev);
608 netif_carrier_off(efx->net_dev);
611 /* Status message for kernel log */
612 if (link_state->up) {
613 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
614 link_state->speed, link_state->fd ? "full" : "half",
616 (efx->promiscuous ? " [PROMISC]" : ""));
618 EFX_INFO(efx, "link down\n");
623 static void efx_fini_port(struct efx_nic *efx);
625 /* This call reinitialises the MAC to pick up new PHY settings. The
626 * caller must hold the mac_lock */
627 void __efx_reconfigure_port(struct efx_nic *efx)
629 WARN_ON(!mutex_is_locked(&efx->mac_lock));
631 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
632 raw_smp_processor_id());
634 /* Serialise the promiscuous flag with efx_set_multicast_list. */
635 if (efx_dev_registered(efx)) {
636 netif_addr_lock_bh(efx->net_dev);
637 netif_addr_unlock_bh(efx->net_dev);
640 falcon_deconfigure_mac_wrapper(efx);
642 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
643 if (LOOPBACK_INTERNAL(efx))
644 efx->phy_mode |= PHY_MODE_TX_DISABLED;
646 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
647 efx->phy_op->reconfigure(efx);
649 if (falcon_switch_mac(efx))
652 efx->mac_op->reconfigure(efx);
654 /* Inform kernel of loss/gain of carrier */
655 efx_link_status_changed(efx);
659 EFX_ERR(efx, "failed to reconfigure MAC\n");
660 efx->port_enabled = false;
664 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
666 void efx_reconfigure_port(struct efx_nic *efx)
668 EFX_ASSERT_RESET_SERIALISED(efx);
670 mutex_lock(&efx->mac_lock);
671 __efx_reconfigure_port(efx);
672 mutex_unlock(&efx->mac_lock);
675 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
676 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
677 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
678 static void efx_phy_work(struct work_struct *data)
680 struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
682 mutex_lock(&efx->mac_lock);
683 if (efx->port_enabled)
684 __efx_reconfigure_port(efx);
685 mutex_unlock(&efx->mac_lock);
688 static void efx_mac_work(struct work_struct *data)
690 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
692 mutex_lock(&efx->mac_lock);
693 if (efx->port_enabled)
694 efx->mac_op->irq(efx);
695 mutex_unlock(&efx->mac_lock);
698 static int efx_probe_port(struct efx_nic *efx)
702 EFX_LOG(efx, "create port\n");
704 /* Connect up MAC/PHY operations table and read MAC address */
705 rc = falcon_probe_port(efx);
710 efx->phy_mode = PHY_MODE_SPECIAL;
712 /* Sanity check MAC address */
713 if (is_valid_ether_addr(efx->mac_address)) {
714 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
716 EFX_ERR(efx, "invalid MAC address %pM\n",
718 if (!allow_bad_hwaddr) {
722 random_ether_addr(efx->net_dev->dev_addr);
723 EFX_INFO(efx, "using locally-generated MAC %pM\n",
724 efx->net_dev->dev_addr);
730 efx_remove_port(efx);
734 static int efx_init_port(struct efx_nic *efx)
738 EFX_LOG(efx, "init port\n");
740 rc = efx->phy_op->init(efx);
743 mutex_lock(&efx->mac_lock);
744 efx->phy_op->reconfigure(efx);
745 rc = falcon_switch_mac(efx);
746 mutex_unlock(&efx->mac_lock);
749 efx->mac_op->reconfigure(efx);
751 efx->port_initialized = true;
752 efx_stats_enable(efx);
756 efx->phy_op->fini(efx);
760 /* Allow efx_reconfigure_port() to be scheduled, and close the window
761 * between efx_stop_port and efx_flush_all whereby a previously scheduled
762 * efx_phy_work()/efx_mac_work() may have been cancelled */
763 static void efx_start_port(struct efx_nic *efx)
765 EFX_LOG(efx, "start port\n");
766 BUG_ON(efx->port_enabled);
768 mutex_lock(&efx->mac_lock);
769 efx->port_enabled = true;
770 __efx_reconfigure_port(efx);
771 efx->mac_op->irq(efx);
772 mutex_unlock(&efx->mac_lock);
775 /* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
776 * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
777 * and efx_mac_work may still be scheduled via NAPI processing until
778 * efx_flush_all() is called */
779 static void efx_stop_port(struct efx_nic *efx)
781 EFX_LOG(efx, "stop port\n");
783 mutex_lock(&efx->mac_lock);
784 efx->port_enabled = false;
785 mutex_unlock(&efx->mac_lock);
787 /* Serialise against efx_set_multicast_list() */
788 if (efx_dev_registered(efx)) {
789 netif_addr_lock_bh(efx->net_dev);
790 netif_addr_unlock_bh(efx->net_dev);
794 static void efx_fini_port(struct efx_nic *efx)
796 EFX_LOG(efx, "shut down port\n");
798 if (!efx->port_initialized)
801 efx_stats_disable(efx);
802 efx->phy_op->fini(efx);
803 efx->port_initialized = false;
805 efx->link_state.up = false;
806 efx_link_status_changed(efx);
809 static void efx_remove_port(struct efx_nic *efx)
811 EFX_LOG(efx, "destroying port\n");
813 falcon_remove_port(efx);
816 /**************************************************************************
820 **************************************************************************/
822 /* This configures the PCI device to enable I/O and DMA. */
823 static int efx_init_io(struct efx_nic *efx)
825 struct pci_dev *pci_dev = efx->pci_dev;
826 dma_addr_t dma_mask = efx->type->max_dma_mask;
829 EFX_LOG(efx, "initialising I/O\n");
831 rc = pci_enable_device(pci_dev);
833 EFX_ERR(efx, "failed to enable PCI device\n");
837 pci_set_master(pci_dev);
839 /* Set the PCI DMA mask. Try all possibilities from our
840 * genuine mask down to 32 bits, because some architectures
841 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
842 * masks event though they reject 46 bit masks.
844 while (dma_mask > 0x7fffffffUL) {
845 if (pci_dma_supported(pci_dev, dma_mask) &&
846 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
851 EFX_ERR(efx, "could not find a suitable DMA mask\n");
854 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
855 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
857 /* pci_set_consistent_dma_mask() is not *allowed* to
858 * fail with a mask that pci_set_dma_mask() accepted,
859 * but just in case...
861 EFX_ERR(efx, "failed to set consistent DMA mask\n");
865 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
866 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
868 EFX_ERR(efx, "request for memory BAR failed\n");
872 efx->membase = ioremap_nocache(efx->membase_phys,
873 efx->type->mem_map_size);
875 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
876 (unsigned long long)efx->membase_phys,
877 efx->type->mem_map_size);
881 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
882 (unsigned long long)efx->membase_phys,
883 efx->type->mem_map_size, efx->membase);
888 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
890 efx->membase_phys = 0;
892 pci_disable_device(efx->pci_dev);
897 static void efx_fini_io(struct efx_nic *efx)
899 EFX_LOG(efx, "shutting down I/O\n");
902 iounmap(efx->membase);
906 if (efx->membase_phys) {
907 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
908 efx->membase_phys = 0;
911 pci_disable_device(efx->pci_dev);
914 /* Get number of RX queues wanted. Return number of online CPU
915 * packages in the expectation that an IRQ balancer will spread
916 * interrupts across them. */
917 static int efx_wanted_rx_queues(void)
919 cpumask_var_t core_mask;
923 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
925 "sfc: RSS disabled due to allocation failure\n");
930 for_each_online_cpu(cpu) {
931 if (!cpumask_test_cpu(cpu, core_mask)) {
933 cpumask_or(core_mask, core_mask,
934 topology_core_cpumask(cpu));
938 free_cpumask_var(core_mask);
942 /* Probe the number and type of interrupts we are able to obtain, and
943 * the resulting numbers of channels and RX queues.
945 static void efx_probe_interrupts(struct efx_nic *efx)
948 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
951 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
952 struct msix_entry xentries[EFX_MAX_CHANNELS];
956 /* We want one RX queue and interrupt per CPU package
957 * (or as specified by the rss_cpus module parameter).
958 * We will need one channel per interrupt.
960 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
961 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
962 wanted_ints = min(wanted_ints, max_channels);
964 for (i = 0; i < wanted_ints; i++)
965 xentries[i].entry = i;
966 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
968 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
969 " available (%d < %d).\n", rc, wanted_ints);
970 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
971 EFX_BUG_ON_PARANOID(rc >= wanted_ints);
973 rc = pci_enable_msix(efx->pci_dev, xentries,
978 efx->n_rx_queues = min(rx_queues, wanted_ints);
979 efx->n_channels = wanted_ints;
980 for (i = 0; i < wanted_ints; i++)
981 efx->channel[i].irq = xentries[i].vector;
983 /* Fall back to single channel MSI */
984 efx->interrupt_mode = EFX_INT_MODE_MSI;
985 EFX_ERR(efx, "could not enable MSI-X\n");
989 /* Try single interrupt MSI */
990 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
991 efx->n_rx_queues = 1;
993 rc = pci_enable_msi(efx->pci_dev);
995 efx->channel[0].irq = efx->pci_dev->irq;
997 EFX_ERR(efx, "could not enable MSI\n");
998 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1002 /* Assume legacy interrupts */
1003 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1004 efx->n_rx_queues = 1;
1005 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
1006 efx->legacy_irq = efx->pci_dev->irq;
1010 static void efx_remove_interrupts(struct efx_nic *efx)
1012 struct efx_channel *channel;
1014 /* Remove MSI/MSI-X interrupts */
1015 efx_for_each_channel(channel, efx)
1017 pci_disable_msi(efx->pci_dev);
1018 pci_disable_msix(efx->pci_dev);
1020 /* Remove legacy interrupt */
1021 efx->legacy_irq = 0;
1024 static void efx_set_channels(struct efx_nic *efx)
1026 struct efx_tx_queue *tx_queue;
1027 struct efx_rx_queue *rx_queue;
1029 efx_for_each_tx_queue(tx_queue, efx) {
1030 if (separate_tx_channels)
1031 tx_queue->channel = &efx->channel[efx->n_channels-1];
1033 tx_queue->channel = &efx->channel[0];
1034 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
1037 efx_for_each_rx_queue(rx_queue, efx) {
1038 rx_queue->channel = &efx->channel[rx_queue->queue];
1039 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
1043 static int efx_probe_nic(struct efx_nic *efx)
1047 EFX_LOG(efx, "creating NIC\n");
1049 /* Carry out hardware-type specific initialisation */
1050 rc = falcon_probe_nic(efx);
1054 /* Determine the number of channels and RX queues by trying to hook
1055 * in MSI-X interrupts. */
1056 efx_probe_interrupts(efx);
1058 efx_set_channels(efx);
1060 /* Initialise the interrupt moderation settings */
1061 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
1066 static void efx_remove_nic(struct efx_nic *efx)
1068 EFX_LOG(efx, "destroying NIC\n");
1070 efx_remove_interrupts(efx);
1071 falcon_remove_nic(efx);
1074 /**************************************************************************
1076 * NIC startup/shutdown
1078 *************************************************************************/
1080 static int efx_probe_all(struct efx_nic *efx)
1082 struct efx_channel *channel;
1086 rc = efx_probe_nic(efx);
1088 EFX_ERR(efx, "failed to create NIC\n");
1093 rc = efx_probe_port(efx);
1095 EFX_ERR(efx, "failed to create port\n");
1099 /* Create channels */
1100 efx_for_each_channel(channel, efx) {
1101 rc = efx_probe_channel(channel);
1103 EFX_ERR(efx, "failed to create channel %d\n",
1108 efx_set_channel_names(efx);
1113 efx_for_each_channel(channel, efx)
1114 efx_remove_channel(channel);
1115 efx_remove_port(efx);
1117 efx_remove_nic(efx);
1122 /* Called after previous invocation(s) of efx_stop_all, restarts the
1123 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1124 * and ensures that the port is scheduled to be reconfigured.
1125 * This function is safe to call multiple times when the NIC is in any
1127 static void efx_start_all(struct efx_nic *efx)
1129 struct efx_channel *channel;
1131 EFX_ASSERT_RESET_SERIALISED(efx);
1133 /* Check that it is appropriate to restart the interface. All
1134 * of these flags are safe to read under just the rtnl lock */
1135 if (efx->port_enabled)
1137 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1139 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1142 /* Mark the port as enabled so port reconfigurations can start, then
1143 * restart the transmit interface early so the watchdog timer stops */
1144 efx_start_port(efx);
1145 if (efx_dev_registered(efx))
1146 efx_wake_queue(efx);
1148 efx_for_each_channel(channel, efx)
1149 efx_start_channel(channel);
1151 falcon_enable_interrupts(efx);
1153 /* Start hardware monitor if we're in RUNNING */
1154 if (efx->state == STATE_RUNNING)
1155 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1156 efx_monitor_interval);
1159 /* Flush all delayed work. Should only be called when no more delayed work
1160 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1161 * since we're holding the rtnl_lock at this point. */
1162 static void efx_flush_all(struct efx_nic *efx)
1164 struct efx_rx_queue *rx_queue;
1166 /* Make sure the hardware monitor is stopped */
1167 cancel_delayed_work_sync(&efx->monitor_work);
1169 /* Ensure that all RX slow refills are complete. */
1170 efx_for_each_rx_queue(rx_queue, efx)
1171 cancel_delayed_work_sync(&rx_queue->work);
1173 /* Stop scheduled port reconfigurations */
1174 cancel_work_sync(&efx->mac_work);
1175 cancel_work_sync(&efx->phy_work);
1179 /* Quiesce hardware and software without bringing the link down.
1180 * Safe to call multiple times, when the nic and interface is in any
1181 * state. The caller is guaranteed to subsequently be in a position
1182 * to modify any hardware and software state they see fit without
1184 static void efx_stop_all(struct efx_nic *efx)
1186 struct efx_channel *channel;
1188 EFX_ASSERT_RESET_SERIALISED(efx);
1190 /* port_enabled can be read safely under the rtnl lock */
1191 if (!efx->port_enabled)
1194 /* Disable interrupts and wait for ISR to complete */
1195 falcon_disable_interrupts(efx);
1196 if (efx->legacy_irq)
1197 synchronize_irq(efx->legacy_irq);
1198 efx_for_each_channel(channel, efx) {
1200 synchronize_irq(channel->irq);
1203 /* Stop all NAPI processing and synchronous rx refills */
1204 efx_for_each_channel(channel, efx)
1205 efx_stop_channel(channel);
1207 /* Stop all asynchronous port reconfigurations. Since all
1208 * event processing has already been stopped, there is no
1209 * window to loose phy events */
1212 /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
1215 /* Isolate the MAC from the TX and RX engines, so that queue
1216 * flushes will complete in a timely fashion. */
1217 falcon_deconfigure_mac_wrapper(efx);
1218 msleep(10); /* Let the Rx FIFO drain */
1219 falcon_drain_tx_fifo(efx);
1221 /* Stop the kernel transmit interface late, so the watchdog
1222 * timer isn't ticking over the flush */
1223 if (efx_dev_registered(efx)) {
1224 efx_stop_queue(efx);
1225 netif_tx_lock_bh(efx->net_dev);
1226 netif_tx_unlock_bh(efx->net_dev);
1230 static void efx_remove_all(struct efx_nic *efx)
1232 struct efx_channel *channel;
1234 efx_for_each_channel(channel, efx)
1235 efx_remove_channel(channel);
1236 efx_remove_port(efx);
1237 efx_remove_nic(efx);
1240 /* A convinience function to safely flush all the queues */
1241 void efx_flush_queues(struct efx_nic *efx)
1243 EFX_ASSERT_RESET_SERIALISED(efx);
1247 efx_fini_channels(efx);
1248 efx_init_channels(efx);
1253 /**************************************************************************
1255 * Interrupt moderation
1257 **************************************************************************/
1259 static unsigned irq_mod_ticks(int usecs, int resolution)
1262 return 0; /* cannot receive interrupts ahead of time :-) */
1263 if (usecs < resolution)
1264 return 1; /* never round down to 0 */
1265 return usecs / resolution;
1268 /* Set interrupt moderation parameters */
1269 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1272 struct efx_tx_queue *tx_queue;
1273 struct efx_rx_queue *rx_queue;
1274 unsigned tx_ticks = irq_mod_ticks(tx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1275 unsigned rx_ticks = irq_mod_ticks(rx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1277 EFX_ASSERT_RESET_SERIALISED(efx);
1279 efx_for_each_tx_queue(tx_queue, efx)
1280 tx_queue->channel->irq_moderation = tx_ticks;
1282 efx->irq_rx_adaptive = rx_adaptive;
1283 efx->irq_rx_moderation = rx_ticks;
1284 efx_for_each_rx_queue(rx_queue, efx)
1285 rx_queue->channel->irq_moderation = rx_ticks;
1288 /**************************************************************************
1292 **************************************************************************/
1294 /* Run periodically off the general workqueue. Serialised against
1295 * efx_reconfigure_port via the mac_lock */
1296 static void efx_monitor(struct work_struct *data)
1298 struct efx_nic *efx = container_of(data, struct efx_nic,
1302 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1303 raw_smp_processor_id());
1305 /* If the mac_lock is already held then it is likely a port
1306 * reconfiguration is already in place, which will likely do
1307 * most of the work of check_hw() anyway. */
1308 if (!mutex_trylock(&efx->mac_lock))
1310 if (!efx->port_enabled)
1312 rc = falcon_board(efx)->type->monitor(efx);
1314 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1315 (rc == -ERANGE) ? "reported fault" : "failed");
1316 efx->phy_mode |= PHY_MODE_LOW_POWER;
1317 falcon_sim_phy_event(efx);
1319 efx->phy_op->poll(efx);
1320 efx->mac_op->poll(efx);
1323 mutex_unlock(&efx->mac_lock);
1325 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1326 efx_monitor_interval);
1329 /**************************************************************************
1333 *************************************************************************/
1336 * Context: process, rtnl_lock() held.
1338 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1340 struct efx_nic *efx = netdev_priv(net_dev);
1341 struct mii_ioctl_data *data = if_mii(ifr);
1343 EFX_ASSERT_RESET_SERIALISED(efx);
1345 /* Convert phy_id from older PRTAD/DEVAD format */
1346 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1347 (data->phy_id & 0xfc00) == 0x0400)
1348 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
1350 return mdio_mii_ioctl(&efx->mdio, data, cmd);
1353 /**************************************************************************
1357 **************************************************************************/
1359 static int efx_init_napi(struct efx_nic *efx)
1361 struct efx_channel *channel;
1363 efx_for_each_channel(channel, efx) {
1364 channel->napi_dev = efx->net_dev;
1365 netif_napi_add(channel->napi_dev, &channel->napi_str,
1366 efx_poll, napi_weight);
1371 static void efx_fini_napi(struct efx_nic *efx)
1373 struct efx_channel *channel;
1375 efx_for_each_channel(channel, efx) {
1376 if (channel->napi_dev)
1377 netif_napi_del(&channel->napi_str);
1378 channel->napi_dev = NULL;
1382 /**************************************************************************
1384 * Kernel netpoll interface
1386 *************************************************************************/
1388 #ifdef CONFIG_NET_POLL_CONTROLLER
1390 /* Although in the common case interrupts will be disabled, this is not
1391 * guaranteed. However, all our work happens inside the NAPI callback,
1392 * so no locking is required.
1394 static void efx_netpoll(struct net_device *net_dev)
1396 struct efx_nic *efx = netdev_priv(net_dev);
1397 struct efx_channel *channel;
1399 efx_for_each_channel(channel, efx)
1400 efx_schedule_channel(channel);
1405 /**************************************************************************
1407 * Kernel net device interface
1409 *************************************************************************/
1411 /* Context: process, rtnl_lock() held. */
1412 static int efx_net_open(struct net_device *net_dev)
1414 struct efx_nic *efx = netdev_priv(net_dev);
1415 EFX_ASSERT_RESET_SERIALISED(efx);
1417 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1418 raw_smp_processor_id());
1420 if (efx->state == STATE_DISABLED)
1422 if (efx->phy_mode & PHY_MODE_SPECIAL)
1429 /* Context: process, rtnl_lock() held.
1430 * Note that the kernel will ignore our return code; this method
1431 * should really be a void.
1433 static int efx_net_stop(struct net_device *net_dev)
1435 struct efx_nic *efx = netdev_priv(net_dev);
1437 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1438 raw_smp_processor_id());
1440 if (efx->state != STATE_DISABLED) {
1441 /* Stop the device and flush all the channels */
1443 efx_fini_channels(efx);
1444 efx_init_channels(efx);
1450 void efx_stats_disable(struct efx_nic *efx)
1452 spin_lock(&efx->stats_lock);
1453 ++efx->stats_disable_count;
1454 spin_unlock(&efx->stats_lock);
1457 void efx_stats_enable(struct efx_nic *efx)
1459 spin_lock(&efx->stats_lock);
1460 --efx->stats_disable_count;
1461 spin_unlock(&efx->stats_lock);
1464 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1465 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1467 struct efx_nic *efx = netdev_priv(net_dev);
1468 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1469 struct net_device_stats *stats = &net_dev->stats;
1471 /* Update stats if possible, but do not wait if another thread
1472 * is updating them or if MAC stats fetches are temporarily
1473 * disabled; slightly stale stats are acceptable.
1475 if (!spin_trylock(&efx->stats_lock))
1477 if (!efx->stats_disable_count) {
1478 efx->mac_op->update_stats(efx);
1479 falcon_update_nic_stats(efx);
1481 spin_unlock(&efx->stats_lock);
1483 stats->rx_packets = mac_stats->rx_packets;
1484 stats->tx_packets = mac_stats->tx_packets;
1485 stats->rx_bytes = mac_stats->rx_bytes;
1486 stats->tx_bytes = mac_stats->tx_bytes;
1487 stats->multicast = mac_stats->rx_multicast;
1488 stats->collisions = mac_stats->tx_collision;
1489 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1490 mac_stats->rx_length_error);
1491 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1492 stats->rx_crc_errors = mac_stats->rx_bad;
1493 stats->rx_frame_errors = mac_stats->rx_align_error;
1494 stats->rx_fifo_errors = mac_stats->rx_overflow;
1495 stats->rx_missed_errors = mac_stats->rx_missed;
1496 stats->tx_window_errors = mac_stats->tx_late_collision;
1498 stats->rx_errors = (stats->rx_length_errors +
1499 stats->rx_over_errors +
1500 stats->rx_crc_errors +
1501 stats->rx_frame_errors +
1502 stats->rx_fifo_errors +
1503 stats->rx_missed_errors +
1504 mac_stats->rx_symbol_error);
1505 stats->tx_errors = (stats->tx_window_errors +
1511 /* Context: netif_tx_lock held, BHs disabled. */
1512 static void efx_watchdog(struct net_device *net_dev)
1514 struct efx_nic *efx = netdev_priv(net_dev);
1516 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
1517 " resetting channels\n",
1518 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1520 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1524 /* Context: process, rtnl_lock() held. */
1525 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1527 struct efx_nic *efx = netdev_priv(net_dev);
1530 EFX_ASSERT_RESET_SERIALISED(efx);
1532 if (new_mtu > EFX_MAX_MTU)
1537 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1539 efx_fini_channels(efx);
1540 net_dev->mtu = new_mtu;
1541 efx_init_channels(efx);
1547 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1549 struct efx_nic *efx = netdev_priv(net_dev);
1550 struct sockaddr *addr = data;
1551 char *new_addr = addr->sa_data;
1553 EFX_ASSERT_RESET_SERIALISED(efx);
1555 if (!is_valid_ether_addr(new_addr)) {
1556 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
1561 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1563 /* Reconfigure the MAC */
1564 efx_reconfigure_port(efx);
1569 /* Context: netif_addr_lock held, BHs disabled. */
1570 static void efx_set_multicast_list(struct net_device *net_dev)
1572 struct efx_nic *efx = netdev_priv(net_dev);
1573 struct dev_mc_list *mc_list = net_dev->mc_list;
1574 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1575 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1576 bool changed = (efx->promiscuous != promiscuous);
1581 efx->promiscuous = promiscuous;
1583 /* Build multicast hash table */
1584 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1585 memset(mc_hash, 0xff, sizeof(*mc_hash));
1587 memset(mc_hash, 0x00, sizeof(*mc_hash));
1588 for (i = 0; i < net_dev->mc_count; i++) {
1589 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1590 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1591 set_bit_le(bit, mc_hash->byte);
1592 mc_list = mc_list->next;
1596 if (!efx->port_enabled)
1597 /* Delay pushing settings until efx_start_port() */
1601 queue_work(efx->workqueue, &efx->phy_work);
1603 /* Create and activate new global multicast hash table */
1604 falcon_set_multicast_hash(efx);
1607 static const struct net_device_ops efx_netdev_ops = {
1608 .ndo_open = efx_net_open,
1609 .ndo_stop = efx_net_stop,
1610 .ndo_get_stats = efx_net_stats,
1611 .ndo_tx_timeout = efx_watchdog,
1612 .ndo_start_xmit = efx_hard_start_xmit,
1613 .ndo_validate_addr = eth_validate_addr,
1614 .ndo_do_ioctl = efx_ioctl,
1615 .ndo_change_mtu = efx_change_mtu,
1616 .ndo_set_mac_address = efx_set_mac_address,
1617 .ndo_set_multicast_list = efx_set_multicast_list,
1618 #ifdef CONFIG_NET_POLL_CONTROLLER
1619 .ndo_poll_controller = efx_netpoll,
1623 static void efx_update_name(struct efx_nic *efx)
1625 strcpy(efx->name, efx->net_dev->name);
1626 efx_mtd_rename(efx);
1627 efx_set_channel_names(efx);
1630 static int efx_netdev_event(struct notifier_block *this,
1631 unsigned long event, void *ptr)
1633 struct net_device *net_dev = ptr;
1635 if (net_dev->netdev_ops == &efx_netdev_ops &&
1636 event == NETDEV_CHANGENAME)
1637 efx_update_name(netdev_priv(net_dev));
1642 static struct notifier_block efx_netdev_notifier = {
1643 .notifier_call = efx_netdev_event,
1647 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
1649 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
1650 return sprintf(buf, "%d\n", efx->phy_type);
1652 static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
1654 static int efx_register_netdev(struct efx_nic *efx)
1656 struct net_device *net_dev = efx->net_dev;
1659 net_dev->watchdog_timeo = 5 * HZ;
1660 net_dev->irq = efx->pci_dev->irq;
1661 net_dev->netdev_ops = &efx_netdev_ops;
1662 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1663 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1665 /* Clear MAC statistics */
1666 efx->mac_op->update_stats(efx);
1667 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1671 rc = dev_alloc_name(net_dev, net_dev->name);
1674 efx_update_name(efx);
1676 rc = register_netdevice(net_dev);
1680 /* Always start with carrier off; PHY events will detect the link */
1681 netif_carrier_off(efx->net_dev);
1685 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1687 EFX_ERR(efx, "failed to init net dev attributes\n");
1688 goto fail_registered;
1695 EFX_ERR(efx, "could not register net dev\n");
1699 unregister_netdev(net_dev);
1703 static void efx_unregister_netdev(struct efx_nic *efx)
1705 struct efx_tx_queue *tx_queue;
1710 BUG_ON(netdev_priv(efx->net_dev) != efx);
1712 /* Free up any skbs still remaining. This has to happen before
1713 * we try to unregister the netdev as running their destructors
1714 * may be needed to get the device ref. count to 0. */
1715 efx_for_each_tx_queue(tx_queue, efx)
1716 efx_release_tx_buffers(tx_queue);
1718 if (efx_dev_registered(efx)) {
1719 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1720 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1721 unregister_netdev(efx->net_dev);
1725 /**************************************************************************
1727 * Device reset and suspend
1729 **************************************************************************/
1731 /* Tears down the entire software state and most of the hardware state
1733 void efx_reset_down(struct efx_nic *efx, enum reset_type method,
1734 struct ethtool_cmd *ecmd)
1736 EFX_ASSERT_RESET_SERIALISED(efx);
1738 efx_stats_disable(efx);
1740 mutex_lock(&efx->mac_lock);
1741 mutex_lock(&efx->spi_lock);
1743 efx->phy_op->get_settings(efx, ecmd);
1745 efx_fini_channels(efx);
1746 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
1747 efx->phy_op->fini(efx);
1750 /* This function will always ensure that the locks acquired in
1751 * efx_reset_down() are released. A failure return code indicates
1752 * that we were unable to reinitialise the hardware, and the
1753 * driver should be disabled. If ok is false, then the rx and tx
1754 * engines are not restarted, pending a RESET_DISABLE. */
1755 int efx_reset_up(struct efx_nic *efx, enum reset_type method,
1756 struct ethtool_cmd *ecmd, bool ok)
1760 EFX_ASSERT_RESET_SERIALISED(efx);
1762 rc = falcon_init_nic(efx);
1764 EFX_ERR(efx, "failed to initialise NIC\n");
1768 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
1770 rc = efx->phy_op->init(efx);
1775 efx->port_initialized = false;
1779 efx_init_channels(efx);
1781 if (efx->phy_op->set_settings(efx, ecmd))
1782 EFX_ERR(efx, "could not restore PHY settings\n");
1785 mutex_unlock(&efx->spi_lock);
1786 mutex_unlock(&efx->mac_lock);
1790 efx_stats_enable(efx);
1795 /* Reset the NIC as transparently as possible. Do not reset the PHY
1796 * Note that the reset may fail, in which case the card will be left
1797 * in a most-probably-unusable state.
1799 * This function will sleep. You cannot reset from within an atomic
1800 * state; use efx_schedule_reset() instead.
1802 * Grabs the rtnl_lock.
1804 static int efx_reset(struct efx_nic *efx)
1806 struct ethtool_cmd ecmd;
1807 enum reset_type method = efx->reset_pending;
1810 /* Serialise with kernel interfaces */
1813 /* If we're not RUNNING then don't reset. Leave the reset_pending
1814 * flag set so that efx_pci_probe_main will be retried */
1815 if (efx->state != STATE_RUNNING) {
1816 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1820 EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
1822 efx_reset_down(efx, method, &ecmd);
1824 rc = falcon_reset_hw(efx, method);
1826 EFX_ERR(efx, "failed to reset hardware\n");
1830 /* Allow resets to be rescheduled. */
1831 efx->reset_pending = RESET_TYPE_NONE;
1833 /* Reinitialise bus-mastering, which may have been turned off before
1834 * the reset was scheduled. This is still appropriate, even in the
1835 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1836 * can respond to requests. */
1837 pci_set_master(efx->pci_dev);
1839 /* Leave device stopped if necessary */
1840 if (method == RESET_TYPE_DISABLE) {
1841 efx_reset_up(efx, method, &ecmd, false);
1844 rc = efx_reset_up(efx, method, &ecmd, true);
1849 EFX_ERR(efx, "has been disabled\n");
1850 efx->state = STATE_DISABLED;
1851 dev_close(efx->net_dev);
1853 EFX_LOG(efx, "reset complete\n");
1861 /* The worker thread exists so that code that cannot sleep can
1862 * schedule a reset for later.
1864 static void efx_reset_work(struct work_struct *data)
1866 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1871 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1873 enum reset_type method;
1875 if (efx->reset_pending != RESET_TYPE_NONE) {
1876 EFX_INFO(efx, "quenching already scheduled reset\n");
1881 case RESET_TYPE_INVISIBLE:
1882 case RESET_TYPE_ALL:
1883 case RESET_TYPE_WORLD:
1884 case RESET_TYPE_DISABLE:
1887 case RESET_TYPE_RX_RECOVERY:
1888 case RESET_TYPE_RX_DESC_FETCH:
1889 case RESET_TYPE_TX_DESC_FETCH:
1890 case RESET_TYPE_TX_SKIP:
1891 method = RESET_TYPE_INVISIBLE;
1894 method = RESET_TYPE_ALL;
1899 EFX_LOG(efx, "scheduling %s reset for %s\n",
1900 RESET_TYPE(method), RESET_TYPE(type));
1902 EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
1904 efx->reset_pending = method;
1906 queue_work(reset_workqueue, &efx->reset_work);
1909 /**************************************************************************
1911 * List of NICs we support
1913 **************************************************************************/
1915 /* PCI device ID table */
1916 static struct pci_device_id efx_pci_table[] __devinitdata = {
1917 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1918 .driver_data = (unsigned long) &falcon_a_nic_type},
1919 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1920 .driver_data = (unsigned long) &falcon_b_nic_type},
1921 {0} /* end of list */
1924 /**************************************************************************
1926 * Dummy PHY/MAC operations
1928 * Can be used for some unimplemented operations
1929 * Needed so all function pointers are valid and do not have to be tested
1932 **************************************************************************/
1933 int efx_port_dummy_op_int(struct efx_nic *efx)
1937 void efx_port_dummy_op_void(struct efx_nic *efx) {}
1938 void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1942 static struct efx_mac_operations efx_dummy_mac_operations = {
1943 .reconfigure = efx_port_dummy_op_void,
1944 .poll = efx_port_dummy_op_void,
1945 .irq = efx_port_dummy_op_void,
1948 static struct efx_phy_operations efx_dummy_phy_operations = {
1949 .init = efx_port_dummy_op_int,
1950 .reconfigure = efx_port_dummy_op_void,
1951 .poll = efx_port_dummy_op_void,
1952 .fini = efx_port_dummy_op_void,
1953 .clear_interrupt = efx_port_dummy_op_void,
1956 /**************************************************************************
1960 **************************************************************************/
1962 /* This zeroes out and then fills in the invariants in a struct
1963 * efx_nic (including all sub-structures).
1965 static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1966 struct pci_dev *pci_dev, struct net_device *net_dev)
1968 struct efx_channel *channel;
1969 struct efx_tx_queue *tx_queue;
1970 struct efx_rx_queue *rx_queue;
1973 /* Initialise common structures */
1974 memset(efx, 0, sizeof(*efx));
1975 spin_lock_init(&efx->biu_lock);
1976 spin_lock_init(&efx->phy_lock);
1977 mutex_init(&efx->spi_lock);
1978 INIT_WORK(&efx->reset_work, efx_reset_work);
1979 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1980 efx->pci_dev = pci_dev;
1981 efx->state = STATE_INIT;
1982 efx->reset_pending = RESET_TYPE_NONE;
1983 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1985 efx->net_dev = net_dev;
1986 efx->rx_checksum_enabled = true;
1987 spin_lock_init(&efx->netif_stop_lock);
1988 spin_lock_init(&efx->stats_lock);
1989 efx->stats_disable_count = 1;
1990 mutex_init(&efx->mac_lock);
1991 efx->mac_op = &efx_dummy_mac_operations;
1992 efx->phy_op = &efx_dummy_phy_operations;
1993 efx->mdio.dev = net_dev;
1994 INIT_WORK(&efx->phy_work, efx_phy_work);
1995 INIT_WORK(&efx->mac_work, efx_mac_work);
1996 atomic_set(&efx->netif_stop_count, 1);
1998 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1999 channel = &efx->channel[i];
2001 channel->channel = i;
2002 channel->work_pending = false;
2004 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
2005 tx_queue = &efx->tx_queue[i];
2006 tx_queue->efx = efx;
2007 tx_queue->queue = i;
2008 tx_queue->buffer = NULL;
2009 tx_queue->channel = &efx->channel[0]; /* for safety */
2010 tx_queue->tso_headers_free = NULL;
2012 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
2013 rx_queue = &efx->rx_queue[i];
2014 rx_queue->efx = efx;
2015 rx_queue->queue = i;
2016 rx_queue->channel = &efx->channel[0]; /* for safety */
2017 rx_queue->buffer = NULL;
2018 spin_lock_init(&rx_queue->add_lock);
2019 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
2024 /* As close as we can get to guaranteeing that we don't overflow */
2025 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
2027 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2029 /* Higher numbered interrupt modes are less capable! */
2030 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2033 /* Would be good to use the net_dev name, but we're too early */
2034 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2036 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2037 if (!efx->workqueue)
2043 static void efx_fini_struct(struct efx_nic *efx)
2045 if (efx->workqueue) {
2046 destroy_workqueue(efx->workqueue);
2047 efx->workqueue = NULL;
2051 /**************************************************************************
2055 **************************************************************************/
2057 /* Main body of final NIC shutdown code
2058 * This is called only at module unload (or hotplug removal).
2060 static void efx_pci_remove_main(struct efx_nic *efx)
2062 falcon_fini_interrupt(efx);
2063 efx_fini_channels(efx);
2066 efx_remove_all(efx);
2069 /* Final NIC shutdown
2070 * This is called only at module unload (or hotplug removal).
2072 static void efx_pci_remove(struct pci_dev *pci_dev)
2074 struct efx_nic *efx;
2076 efx = pci_get_drvdata(pci_dev);
2080 /* Mark the NIC as fini, then stop the interface */
2082 efx->state = STATE_FINI;
2083 dev_close(efx->net_dev);
2085 /* Allow any queued efx_resets() to complete */
2088 efx_unregister_netdev(efx);
2090 efx_mtd_remove(efx);
2092 /* Wait for any scheduled resets to complete. No more will be
2093 * scheduled from this point because efx_stop_all() has been
2094 * called, we are no longer registered with driverlink, and
2095 * the net_device's have been removed. */
2096 cancel_work_sync(&efx->reset_work);
2098 efx_pci_remove_main(efx);
2101 EFX_LOG(efx, "shutdown successful\n");
2103 pci_set_drvdata(pci_dev, NULL);
2104 efx_fini_struct(efx);
2105 free_netdev(efx->net_dev);
2108 /* Main body of NIC initialisation
2109 * This is called at module load (or hotplug insertion, theoretically).
2111 static int efx_pci_probe_main(struct efx_nic *efx)
2115 /* Do start-of-day initialisation */
2116 rc = efx_probe_all(efx);
2120 rc = efx_init_napi(efx);
2124 rc = falcon_init_nic(efx);
2126 EFX_ERR(efx, "failed to initialise NIC\n");
2130 rc = efx_init_port(efx);
2132 EFX_ERR(efx, "failed to initialise port\n");
2136 efx_init_channels(efx);
2138 rc = falcon_init_interrupt(efx);
2145 efx_fini_channels(efx);
2151 efx_remove_all(efx);
2156 /* NIC initialisation
2158 * This is called at module load (or hotplug insertion,
2159 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2160 * sets up and registers the network devices with the kernel and hooks
2161 * the interrupt service routine. It does not prepare the device for
2162 * transmission; this is left to the first time one of the network
2163 * interfaces is brought up (i.e. efx_net_open).
2165 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2166 const struct pci_device_id *entry)
2168 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2169 struct net_device *net_dev;
2170 struct efx_nic *efx;
2173 /* Allocate and initialise a struct net_device and struct efx_nic */
2174 net_dev = alloc_etherdev(sizeof(*efx));
2177 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2178 NETIF_F_HIGHDMA | NETIF_F_TSO |
2180 /* Mask for features that also apply to VLAN devices */
2181 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2182 NETIF_F_HIGHDMA | NETIF_F_TSO);
2183 efx = netdev_priv(net_dev);
2184 pci_set_drvdata(pci_dev, efx);
2185 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2189 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2191 /* Set up basic I/O (BAR mappings etc) */
2192 rc = efx_init_io(efx);
2196 /* No serialisation is required with the reset path because
2197 * we're in STATE_INIT. */
2198 for (i = 0; i < 5; i++) {
2199 rc = efx_pci_probe_main(efx);
2201 /* Serialise against efx_reset(). No more resets will be
2202 * scheduled since efx_stop_all() has been called, and we
2203 * have not and never have been registered with either
2204 * the rtnetlink or driverlink layers. */
2205 cancel_work_sync(&efx->reset_work);
2208 if (efx->reset_pending != RESET_TYPE_NONE) {
2209 /* If there was a scheduled reset during
2210 * probe, the NIC is probably hosed anyway */
2211 efx_pci_remove_main(efx);
2218 /* Retry if a recoverably reset event has been scheduled */
2219 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2220 (efx->reset_pending != RESET_TYPE_ALL))
2223 efx->reset_pending = RESET_TYPE_NONE;
2227 EFX_ERR(efx, "Could not reset NIC\n");
2231 /* Switch to the running state before we expose the device to
2232 * the OS. This is to ensure that the initial gathering of
2233 * MAC stats succeeds. */
2234 efx->state = STATE_RUNNING;
2236 rc = efx_register_netdev(efx);
2240 EFX_LOG(efx, "initialisation successful\n");
2243 efx_mtd_probe(efx); /* allowed to fail */
2248 efx_pci_remove_main(efx);
2253 efx_fini_struct(efx);
2255 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2256 free_netdev(net_dev);
2260 static struct pci_driver efx_pci_driver = {
2261 .name = EFX_DRIVER_NAME,
2262 .id_table = efx_pci_table,
2263 .probe = efx_pci_probe,
2264 .remove = efx_pci_remove,
2267 /**************************************************************************
2269 * Kernel module interface
2271 *************************************************************************/
2273 module_param(interrupt_mode, uint, 0444);
2274 MODULE_PARM_DESC(interrupt_mode,
2275 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2277 static int __init efx_init_module(void)
2281 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2283 rc = register_netdevice_notifier(&efx_netdev_notifier);
2287 refill_workqueue = create_workqueue("sfc_refill");
2288 if (!refill_workqueue) {
2292 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2293 if (!reset_workqueue) {
2298 rc = pci_register_driver(&efx_pci_driver);
2305 destroy_workqueue(reset_workqueue);
2307 destroy_workqueue(refill_workqueue);
2309 unregister_netdevice_notifier(&efx_netdev_notifier);
2314 static void __exit efx_exit_module(void)
2316 printk(KERN_INFO "Solarflare NET driver unloading\n");
2318 pci_unregister_driver(&efx_pci_driver);
2319 destroy_workqueue(reset_workqueue);
2320 destroy_workqueue(refill_workqueue);
2321 unregister_netdevice_notifier(&efx_netdev_notifier);
2325 module_init(efx_init_module);
2326 module_exit(efx_exit_module);
2328 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2329 "Solarflare Communications");
2330 MODULE_DESCRIPTION("Solarflare Communications network driver");
2331 MODULE_LICENSE("GPL");
2332 MODULE_DEVICE_TABLE(pci, efx_pci_table);