1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/i2c-algo-bit.h>
18 #include <linux/mii.h>
19 #include "net_driver.h"
25 #include "falcon_hwdefs.h"
26 #include "falcon_io.h"
29 #include "workarounds.h"
31 /* Falcon hardware control.
32 * Falcon is the internal codename for the SFC4000 controller that is
33 * present in SFE400X evaluation boards
37 * struct falcon_nic_data - Falcon NIC state
38 * @next_buffer_table: First available buffer table id
39 * @pci_dev2: The secondary PCI device if present
40 * @i2c_data: Operations and state for I2C bit-bashing algorithm
41 * @int_error_count: Number of internal errors seen recently
42 * @int_error_expire: Time at which error count will be expired
44 struct falcon_nic_data {
45 unsigned next_buffer_table;
46 struct pci_dev *pci_dev2;
47 struct i2c_algo_bit_data i2c_data;
49 unsigned int_error_count;
50 unsigned long int_error_expire;
53 /**************************************************************************
57 **************************************************************************
60 static int disable_dma_stats;
62 /* This is set to 16 for a good reason. In summary, if larger than
63 * 16, the descriptor cache holds more than a default socket
64 * buffer's worth of packets (for UDP we can only have at most one
65 * socket buffer's worth outstanding). This combined with the fact
66 * that we only get 1 TX event per descriptor cache means the NIC
69 #define TX_DC_ENTRIES 16
70 #define TX_DC_ENTRIES_ORDER 0
71 #define TX_DC_BASE 0x130000
73 #define RX_DC_ENTRIES 64
74 #define RX_DC_ENTRIES_ORDER 2
75 #define RX_DC_BASE 0x100000
77 static const unsigned int
78 /* "Large" EEPROM device: Atmel AT25640 or similar
79 * 8 KB, 16-bit address, 32 B write block */
80 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
81 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
82 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
83 /* Default flash device: Atmel AT25F1024
84 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
85 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
86 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
87 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
88 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
89 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
91 /* RX FIFO XOFF watermark
93 * When the amount of the RX FIFO increases used increases past this
94 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
95 * This also has an effect on RX/TX arbitration
97 static int rx_xoff_thresh_bytes = -1;
98 module_param(rx_xoff_thresh_bytes, int, 0644);
99 MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
101 /* RX FIFO XON watermark
103 * When the amount of the RX FIFO used decreases below this
104 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
105 * This also has an effect on RX/TX arbitration
107 static int rx_xon_thresh_bytes = -1;
108 module_param(rx_xon_thresh_bytes, int, 0644);
109 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
111 /* TX descriptor ring size - min 512 max 4k */
112 #define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
113 #define FALCON_TXD_RING_SIZE 1024
114 #define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
116 /* RX descriptor ring size - min 512 max 4k */
117 #define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
118 #define FALCON_RXD_RING_SIZE 1024
119 #define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
121 /* Event queue size - max 32k */
122 #define FALCON_EVQ_ORDER EVQ_SIZE_4K
123 #define FALCON_EVQ_SIZE 4096
124 #define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
126 /* If FALCON_MAX_INT_ERRORS internal errors occur within
127 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
130 #define FALCON_INT_ERROR_EXPIRE 3600
131 #define FALCON_MAX_INT_ERRORS 5
133 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
135 #define FALCON_FLUSH_INTERVAL 10
136 #define FALCON_FLUSH_POLL_COUNT 100
138 /**************************************************************************
142 **************************************************************************
145 /* DMA address mask */
146 #define FALCON_DMA_MASK DMA_BIT_MASK(46)
148 /* TX DMA length mask (13-bit) */
149 #define FALCON_TX_DMA_MASK (4096 - 1)
151 /* Size and alignment of special buffers (4KB) */
152 #define FALCON_BUF_SIZE 4096
154 /* Dummy SRAM size code */
155 #define SRM_NB_BSZ_ONCHIP_ONLY (-1)
157 #define FALCON_IS_DUAL_FUNC(efx) \
158 (falcon_rev(efx) < FALCON_REV_B0)
160 /**************************************************************************
162 * Falcon hardware access
164 **************************************************************************/
166 /* Read the current event from the event queue */
167 static inline efx_qword_t *falcon_event(struct efx_channel *channel,
170 return (((efx_qword_t *) (channel->eventq.addr)) + index);
173 /* See if an event is present
175 * We check both the high and low dword of the event for all ones. We
176 * wrote all ones when we cleared the event, and no valid event can
177 * have all ones in either its high or low dwords. This approach is
178 * robust against reordering.
180 * Note that using a single 64-bit comparison is incorrect; even
181 * though the CPU read will be atomic, the DMA write may not be.
183 static inline int falcon_event_present(efx_qword_t *event)
185 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
186 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
189 /**************************************************************************
191 * I2C bus - this is a bit-bashing interface using GPIO pins
192 * Note that it uses the output enables to tristate the outputs
193 * SDA is the data pin and SCL is the clock
195 **************************************************************************
197 static void falcon_setsda(void *data, int state)
199 struct efx_nic *efx = (struct efx_nic *)data;
202 falcon_read(efx, ®, GPIO_CTL_REG_KER);
203 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state);
204 falcon_write(efx, ®, GPIO_CTL_REG_KER);
207 static void falcon_setscl(void *data, int state)
209 struct efx_nic *efx = (struct efx_nic *)data;
212 falcon_read(efx, ®, GPIO_CTL_REG_KER);
213 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state);
214 falcon_write(efx, ®, GPIO_CTL_REG_KER);
217 static int falcon_getsda(void *data)
219 struct efx_nic *efx = (struct efx_nic *)data;
222 falcon_read(efx, ®, GPIO_CTL_REG_KER);
223 return EFX_OWORD_FIELD(reg, GPIO3_IN);
226 static int falcon_getscl(void *data)
228 struct efx_nic *efx = (struct efx_nic *)data;
231 falcon_read(efx, ®, GPIO_CTL_REG_KER);
232 return EFX_OWORD_FIELD(reg, GPIO0_IN);
235 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
236 .setsda = falcon_setsda,
237 .setscl = falcon_setscl,
238 .getsda = falcon_getsda,
239 .getscl = falcon_getscl,
241 /* Wait up to 50 ms for slave to let us pull SCL high */
242 .timeout = DIV_ROUND_UP(HZ, 20),
245 /**************************************************************************
247 * Falcon special buffer handling
248 * Special buffers are used for event queues and the TX and RX
251 *************************************************************************/
254 * Initialise a Falcon special buffer
256 * This will define a buffer (previously allocated via
257 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
258 * it to be used for event queues, descriptor rings etc.
261 falcon_init_special_buffer(struct efx_nic *efx,
262 struct efx_special_buffer *buffer)
264 efx_qword_t buf_desc;
269 EFX_BUG_ON_PARANOID(!buffer->addr);
271 /* Write buffer descriptors to NIC */
272 for (i = 0; i < buffer->entries; i++) {
273 index = buffer->index + i;
274 dma_addr = buffer->dma_addr + (i * 4096);
275 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
276 index, (unsigned long long)dma_addr);
277 EFX_POPULATE_QWORD_4(buf_desc,
278 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
280 BUF_ADR_FBUF, (dma_addr >> 12),
281 BUF_OWNER_ID_FBUF, 0);
282 falcon_write_sram(efx, &buf_desc, index);
286 /* Unmaps a buffer from Falcon and clears the buffer table entries */
288 falcon_fini_special_buffer(struct efx_nic *efx,
289 struct efx_special_buffer *buffer)
291 efx_oword_t buf_tbl_upd;
292 unsigned int start = buffer->index;
293 unsigned int end = (buffer->index + buffer->entries - 1);
295 if (!buffer->entries)
298 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
299 buffer->index, buffer->index + buffer->entries - 1);
301 EFX_POPULATE_OWORD_4(buf_tbl_upd,
305 BUF_CLR_START_ID, start);
306 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
310 * Allocate a new Falcon special buffer
312 * This allocates memory for a new buffer, clears it and allocates a
313 * new buffer ID range. It does not write into Falcon's buffer table.
315 * This call will allocate 4KB buffers, since Falcon can't use 8KB
316 * buffers for event queues and descriptor rings.
318 static int falcon_alloc_special_buffer(struct efx_nic *efx,
319 struct efx_special_buffer *buffer,
322 struct falcon_nic_data *nic_data = efx->nic_data;
324 len = ALIGN(len, FALCON_BUF_SIZE);
326 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
331 buffer->entries = len / FALCON_BUF_SIZE;
332 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
334 /* All zeros is a potentially valid event so memset to 0xff */
335 memset(buffer->addr, 0xff, len);
337 /* Select new buffer ID */
338 buffer->index = nic_data->next_buffer_table;
339 nic_data->next_buffer_table += buffer->entries;
341 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
342 "(virt %p phys %llx)\n", buffer->index,
343 buffer->index + buffer->entries - 1,
344 (u64)buffer->dma_addr, len,
345 buffer->addr, (u64)virt_to_phys(buffer->addr));
350 static void falcon_free_special_buffer(struct efx_nic *efx,
351 struct efx_special_buffer *buffer)
356 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
357 "(virt %p phys %llx)\n", buffer->index,
358 buffer->index + buffer->entries - 1,
359 (u64)buffer->dma_addr, buffer->len,
360 buffer->addr, (u64)virt_to_phys(buffer->addr));
362 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
368 /**************************************************************************
370 * Falcon generic buffer handling
371 * These buffers are used for interrupt status and MAC stats
373 **************************************************************************/
375 static int falcon_alloc_buffer(struct efx_nic *efx,
376 struct efx_buffer *buffer, unsigned int len)
378 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
383 memset(buffer->addr, 0, len);
387 static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
390 pci_free_consistent(efx->pci_dev, buffer->len,
391 buffer->addr, buffer->dma_addr);
396 /**************************************************************************
400 **************************************************************************/
402 /* Returns a pointer to the specified transmit descriptor in the TX
403 * descriptor queue belonging to the specified channel.
405 static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
408 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
411 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
412 static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
417 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
418 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
419 falcon_writel_page(tx_queue->efx, ®,
420 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
424 /* For each entry inserted into the software descriptor ring, create a
425 * descriptor in the hardware TX descriptor ring (in host memory), and
428 void falcon_push_buffers(struct efx_tx_queue *tx_queue)
431 struct efx_tx_buffer *buffer;
435 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
438 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
439 buffer = &tx_queue->buffer[write_ptr];
440 txd = falcon_tx_desc(tx_queue, write_ptr);
441 ++tx_queue->write_count;
443 /* Create TX descriptor ring entry */
444 EFX_POPULATE_QWORD_5(*txd,
446 TX_KER_CONT, buffer->continuation,
447 TX_KER_BYTE_CNT, buffer->len,
448 TX_KER_BUF_REGION, 0,
449 TX_KER_BUF_ADR, buffer->dma_addr);
450 } while (tx_queue->write_count != tx_queue->insert_count);
452 wmb(); /* Ensure descriptors are written before they are fetched */
453 falcon_notify_tx_desc(tx_queue);
456 /* Allocate hardware resources for a TX queue */
457 int falcon_probe_tx(struct efx_tx_queue *tx_queue)
459 struct efx_nic *efx = tx_queue->efx;
460 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
461 FALCON_TXD_RING_SIZE *
462 sizeof(efx_qword_t));
465 void falcon_init_tx(struct efx_tx_queue *tx_queue)
467 efx_oword_t tx_desc_ptr;
468 struct efx_nic *efx = tx_queue->efx;
470 tx_queue->flushed = false;
472 /* Pin TX descriptor ring */
473 falcon_init_special_buffer(efx, &tx_queue->txd);
475 /* Push TX descriptor ring to card */
476 EFX_POPULATE_OWORD_10(tx_desc_ptr,
480 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
481 TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
482 TX_DESCQ_OWNER_ID, 0,
483 TX_DESCQ_LABEL, tx_queue->queue,
484 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
486 TX_NON_IP_DROP_DIS_B0, 1);
488 if (falcon_rev(efx) >= FALCON_REV_B0) {
489 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
490 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
491 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
494 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
497 if (falcon_rev(efx) < FALCON_REV_B0) {
500 /* Only 128 bits in this register */
501 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
503 falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1);
504 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
505 clear_bit_le(tx_queue->queue, (void *)®);
507 set_bit_le(tx_queue->queue, (void *)®);
508 falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1);
512 static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
514 struct efx_nic *efx = tx_queue->efx;
515 efx_oword_t tx_flush_descq;
517 /* Post a flush command */
518 EFX_POPULATE_OWORD_2(tx_flush_descq,
519 TX_FLUSH_DESCQ_CMD, 1,
520 TX_FLUSH_DESCQ, tx_queue->queue);
521 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
524 void falcon_fini_tx(struct efx_tx_queue *tx_queue)
526 struct efx_nic *efx = tx_queue->efx;
527 efx_oword_t tx_desc_ptr;
529 /* The queue should have been flushed */
530 WARN_ON(!tx_queue->flushed);
532 /* Remove TX descriptor ring from card */
533 EFX_ZERO_OWORD(tx_desc_ptr);
534 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
537 /* Unpin TX descriptor ring */
538 falcon_fini_special_buffer(efx, &tx_queue->txd);
541 /* Free buffers backing TX queue */
542 void falcon_remove_tx(struct efx_tx_queue *tx_queue)
544 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
547 /**************************************************************************
551 **************************************************************************/
553 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
554 static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
557 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
560 /* This creates an entry in the RX descriptor queue */
561 static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
564 struct efx_rx_buffer *rx_buf;
567 rxd = falcon_rx_desc(rx_queue, index);
568 rx_buf = efx_rx_buffer(rx_queue, index);
569 EFX_POPULATE_QWORD_3(*rxd,
572 rx_queue->efx->type->rx_buffer_padding,
573 RX_KER_BUF_REGION, 0,
574 RX_KER_BUF_ADR, rx_buf->dma_addr);
577 /* This writes to the RX_DESC_WPTR register for the specified receive
580 void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
585 while (rx_queue->notified_count != rx_queue->added_count) {
586 falcon_build_rx_desc(rx_queue,
587 rx_queue->notified_count &
588 FALCON_RXD_RING_MASK);
589 ++rx_queue->notified_count;
593 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
594 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
595 falcon_writel_page(rx_queue->efx, ®,
596 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
599 int falcon_probe_rx(struct efx_rx_queue *rx_queue)
601 struct efx_nic *efx = rx_queue->efx;
602 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
603 FALCON_RXD_RING_SIZE *
604 sizeof(efx_qword_t));
607 void falcon_init_rx(struct efx_rx_queue *rx_queue)
609 efx_oword_t rx_desc_ptr;
610 struct efx_nic *efx = rx_queue->efx;
611 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
612 bool iscsi_digest_en = is_b0;
614 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
615 rx_queue->queue, rx_queue->rxd.index,
616 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
618 rx_queue->flushed = false;
620 /* Pin RX descriptor ring */
621 falcon_init_special_buffer(efx, &rx_queue->rxd);
623 /* Push RX descriptor ring to card */
624 EFX_POPULATE_OWORD_10(rx_desc_ptr,
625 RX_ISCSI_DDIG_EN, iscsi_digest_en,
626 RX_ISCSI_HDIG_EN, iscsi_digest_en,
627 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
628 RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
629 RX_DESCQ_OWNER_ID, 0,
630 RX_DESCQ_LABEL, rx_queue->queue,
631 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
632 RX_DESCQ_TYPE, 0 /* kernel queue */ ,
633 /* For >=B0 this is scatter so disable */
634 RX_DESCQ_JUMBO, !is_b0,
636 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
640 static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
642 struct efx_nic *efx = rx_queue->efx;
643 efx_oword_t rx_flush_descq;
645 /* Post a flush command */
646 EFX_POPULATE_OWORD_2(rx_flush_descq,
647 RX_FLUSH_DESCQ_CMD, 1,
648 RX_FLUSH_DESCQ, rx_queue->queue);
649 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
652 void falcon_fini_rx(struct efx_rx_queue *rx_queue)
654 efx_oword_t rx_desc_ptr;
655 struct efx_nic *efx = rx_queue->efx;
657 /* The queue should already have been flushed */
658 WARN_ON(!rx_queue->flushed);
660 /* Remove RX descriptor ring from card */
661 EFX_ZERO_OWORD(rx_desc_ptr);
662 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
665 /* Unpin RX descriptor ring */
666 falcon_fini_special_buffer(efx, &rx_queue->rxd);
669 /* Free buffers backing RX queue */
670 void falcon_remove_rx(struct efx_rx_queue *rx_queue)
672 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
675 /**************************************************************************
677 * Falcon event queue processing
678 * Event queues are processed by per-channel tasklets.
680 **************************************************************************/
682 /* Update a channel's event queue's read pointer (RPTR) register
684 * This writes the EVQ_RPTR_REG register for the specified channel's
687 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
688 * whereas channel->eventq_read_ptr contains the index of the "next to
691 void falcon_eventq_read_ack(struct efx_channel *channel)
694 struct efx_nic *efx = channel->efx;
696 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
697 falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base,
701 /* Use HW to insert a SW defined event */
702 void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
704 efx_oword_t drv_ev_reg;
706 EFX_POPULATE_OWORD_2(drv_ev_reg,
707 DRV_EV_QID, channel->channel,
709 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
710 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
713 /* Handle a transmit completion event
715 * Falcon batches TX completion events; the message we receive is of
716 * the form "complete all TX events up to this index".
718 static void falcon_handle_tx_event(struct efx_channel *channel,
721 unsigned int tx_ev_desc_ptr;
722 unsigned int tx_ev_q_label;
723 struct efx_tx_queue *tx_queue;
724 struct efx_nic *efx = channel->efx;
726 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
727 /* Transmit completion */
728 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
729 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
730 tx_queue = &efx->tx_queue[tx_ev_q_label];
731 channel->irq_mod_score +=
732 (tx_ev_desc_ptr - tx_queue->read_count) &
733 efx->type->txd_ring_mask;
734 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
735 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
736 /* Rewrite the FIFO write pointer */
737 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
738 tx_queue = &efx->tx_queue[tx_ev_q_label];
740 if (efx_dev_registered(efx))
741 netif_tx_lock(efx->net_dev);
742 falcon_notify_tx_desc(tx_queue);
743 if (efx_dev_registered(efx))
744 netif_tx_unlock(efx->net_dev);
745 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
746 EFX_WORKAROUND_10727(efx)) {
747 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
749 EFX_ERR(efx, "channel %d unexpected TX event "
750 EFX_QWORD_FMT"\n", channel->channel,
751 EFX_QWORD_VAL(*event));
755 /* Detect errors included in the rx_evt_pkt_ok bit. */
756 static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
757 const efx_qword_t *event,
761 struct efx_nic *efx = rx_queue->efx;
762 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
763 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
764 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
765 bool rx_ev_other_err, rx_ev_pause_frm;
766 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
767 unsigned rx_ev_pkt_type;
769 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
770 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
771 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
772 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
773 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
774 RX_EV_BUF_OWNER_ID_ERR);
775 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
776 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
777 RX_EV_IP_HDR_CHKSUM_ERR);
778 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
779 RX_EV_TCP_UDP_CHKSUM_ERR);
780 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
781 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
782 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
783 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
784 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
786 /* Every error apart from tobe_disc and pause_frm */
787 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
788 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
789 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
791 /* Count errors that are not in MAC stats. Ignore expected
792 * checksum errors during self-test. */
794 ++rx_queue->channel->n_rx_frm_trunc;
795 else if (rx_ev_tobe_disc)
796 ++rx_queue->channel->n_rx_tobe_disc;
797 else if (!efx->loopback_selftest) {
798 if (rx_ev_ip_hdr_chksum_err)
799 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
800 else if (rx_ev_tcp_udp_chksum_err)
801 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
803 if (rx_ev_ip_frag_err)
804 ++rx_queue->channel->n_rx_ip_frag_err;
806 /* The frame must be discarded if any of these are true. */
807 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
808 rx_ev_tobe_disc | rx_ev_pause_frm);
810 /* TOBE_DISC is expected on unicast mismatches; don't print out an
811 * error message. FRM_TRUNC indicates RXDP dropped the packet due
812 * to a FIFO overflow.
814 #ifdef EFX_ENABLE_DEBUG
815 if (rx_ev_other_err) {
816 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
817 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
818 rx_queue->queue, EFX_QWORD_VAL(*event),
819 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
820 rx_ev_ip_hdr_chksum_err ?
821 " [IP_HDR_CHKSUM_ERR]" : "",
822 rx_ev_tcp_udp_chksum_err ?
823 " [TCP_UDP_CHKSUM_ERR]" : "",
824 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
825 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
826 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
827 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
828 rx_ev_pause_frm ? " [PAUSE]" : "");
833 /* Handle receive events that are not in-order. */
834 static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
837 struct efx_nic *efx = rx_queue->efx;
838 unsigned expected, dropped;
840 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
841 dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
842 FALCON_RXD_RING_MASK);
843 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
844 dropped, index, expected);
846 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
847 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
850 /* Handle a packet received event
852 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
853 * wrong destination address
854 * Also "is multicast" and "matches multicast filter" flags can be used to
855 * discard non-matching multicast packets.
857 static void falcon_handle_rx_event(struct efx_channel *channel,
858 const efx_qword_t *event)
860 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
861 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
862 unsigned expected_ptr;
863 bool rx_ev_pkt_ok, discard = false, checksummed;
864 struct efx_rx_queue *rx_queue;
865 struct efx_nic *efx = channel->efx;
867 /* Basic packet information */
868 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
869 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
870 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
871 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
872 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
873 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
875 rx_queue = &efx->rx_queue[channel->channel];
877 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
878 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
879 if (unlikely(rx_ev_desc_ptr != expected_ptr))
880 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
882 if (likely(rx_ev_pkt_ok)) {
883 /* If packet is marked as OK and packet type is TCP/IPv4 or
884 * UDP/IPv4, then we can rely on the hardware checksum.
886 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
888 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
893 /* Detect multicast packets that didn't match the filter */
894 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
895 if (rx_ev_mcast_pkt) {
896 unsigned int rx_ev_mcast_hash_match =
897 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
899 if (unlikely(!rx_ev_mcast_hash_match))
903 channel->irq_mod_score += 2;
905 /* Handle received packet */
906 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
907 checksummed, discard);
910 /* Global events are basically PHY events */
911 static void falcon_handle_global_event(struct efx_channel *channel,
914 struct efx_nic *efx = channel->efx;
915 bool handled = false;
917 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
918 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
919 EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
920 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
921 efx->phy_op->clear_interrupt(efx);
922 queue_work(efx->workqueue, &efx->phy_work);
926 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
927 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
928 queue_work(efx->workqueue, &efx->mac_work);
932 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
933 EFX_ERR(efx, "channel %d seen global RX_RESET "
934 "event. Resetting.\n", channel->channel);
936 atomic_inc(&efx->rx_reset);
937 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
938 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
943 EFX_ERR(efx, "channel %d unknown global event "
944 EFX_QWORD_FMT "\n", channel->channel,
945 EFX_QWORD_VAL(*event));
948 static void falcon_handle_driver_event(struct efx_channel *channel,
951 struct efx_nic *efx = channel->efx;
952 unsigned int ev_sub_code;
953 unsigned int ev_sub_data;
955 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
956 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
958 switch (ev_sub_code) {
959 case TX_DESCQ_FLS_DONE_EV_DECODE:
960 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
961 channel->channel, ev_sub_data);
963 case RX_DESCQ_FLS_DONE_EV_DECODE:
964 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
965 channel->channel, ev_sub_data);
967 case EVQ_INIT_DONE_EV_DECODE:
968 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
969 channel->channel, ev_sub_data);
971 case SRM_UPD_DONE_EV_DECODE:
972 EFX_TRACE(efx, "channel %d SRAM update done\n",
975 case WAKE_UP_EV_DECODE:
976 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
977 channel->channel, ev_sub_data);
979 case TIMER_EV_DECODE:
980 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
981 channel->channel, ev_sub_data);
983 case RX_RECOVERY_EV_DECODE:
984 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
985 "Resetting.\n", channel->channel);
986 atomic_inc(&efx->rx_reset);
987 efx_schedule_reset(efx,
988 EFX_WORKAROUND_6555(efx) ?
989 RESET_TYPE_RX_RECOVERY :
992 case RX_DSC_ERROR_EV_DECODE:
993 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
994 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
995 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
997 case TX_DSC_ERROR_EV_DECODE:
998 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
999 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1000 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1003 EFX_TRACE(efx, "channel %d unknown driver event code %d "
1004 "data %04x\n", channel->channel, ev_sub_code,
1010 int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1012 unsigned int read_ptr;
1013 efx_qword_t event, *p_event;
1017 read_ptr = channel->eventq_read_ptr;
1020 p_event = falcon_event(channel, read_ptr);
1023 if (!falcon_event_present(&event))
1027 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1028 channel->channel, EFX_QWORD_VAL(event));
1030 /* Clear this event by marking it all ones */
1031 EFX_SET_QWORD(*p_event);
1033 ev_code = EFX_QWORD_FIELD(event, EV_CODE);
1036 case RX_IP_EV_DECODE:
1037 falcon_handle_rx_event(channel, &event);
1040 case TX_IP_EV_DECODE:
1041 falcon_handle_tx_event(channel, &event);
1043 case DRV_GEN_EV_DECODE:
1044 channel->eventq_magic
1045 = EFX_QWORD_FIELD(event, EVQ_MAGIC);
1046 EFX_LOG(channel->efx, "channel %d received generated "
1047 "event "EFX_QWORD_FMT"\n", channel->channel,
1048 EFX_QWORD_VAL(event));
1050 case GLOBAL_EV_DECODE:
1051 falcon_handle_global_event(channel, &event);
1053 case DRIVER_EV_DECODE:
1054 falcon_handle_driver_event(channel, &event);
1057 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1058 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1059 ev_code, EFX_QWORD_VAL(event));
1062 /* Increment read pointer */
1063 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1065 } while (rx_packets < rx_quota);
1067 channel->eventq_read_ptr = read_ptr;
1071 void falcon_set_int_moderation(struct efx_channel *channel)
1073 efx_dword_t timer_cmd;
1074 struct efx_nic *efx = channel->efx;
1076 /* Set timer register */
1077 if (channel->irq_moderation) {
1078 /* Round to resolution supported by hardware. The value we
1079 * program is based at 0. So actual interrupt moderation
1080 * achieved is ((x + 1) * res).
1082 channel->irq_moderation -= (channel->irq_moderation %
1083 FALCON_IRQ_MOD_RESOLUTION);
1084 if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
1085 channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
1086 EFX_POPULATE_DWORD_2(timer_cmd,
1087 TIMER_MODE, TIMER_MODE_INT_HLDOFF,
1089 channel->irq_moderation /
1090 FALCON_IRQ_MOD_RESOLUTION - 1);
1092 EFX_POPULATE_DWORD_2(timer_cmd,
1093 TIMER_MODE, TIMER_MODE_DIS,
1096 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
1101 /* Allocate buffer table entries for event queue */
1102 int falcon_probe_eventq(struct efx_channel *channel)
1104 struct efx_nic *efx = channel->efx;
1105 unsigned int evq_size;
1107 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
1108 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1111 void falcon_init_eventq(struct efx_channel *channel)
1113 efx_oword_t evq_ptr;
1114 struct efx_nic *efx = channel->efx;
1116 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1117 channel->channel, channel->eventq.index,
1118 channel->eventq.index + channel->eventq.entries - 1);
1120 /* Pin event queue buffer */
1121 falcon_init_special_buffer(efx, &channel->eventq);
1123 /* Fill event queue with all ones (i.e. empty events) */
1124 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1126 /* Push event queue to card */
1127 EFX_POPULATE_OWORD_3(evq_ptr,
1129 EVQ_SIZE, FALCON_EVQ_ORDER,
1130 EVQ_BUF_BASE_ID, channel->eventq.index);
1131 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1134 falcon_set_int_moderation(channel);
1137 void falcon_fini_eventq(struct efx_channel *channel)
1139 efx_oword_t eventq_ptr;
1140 struct efx_nic *efx = channel->efx;
1142 /* Remove event queue from card */
1143 EFX_ZERO_OWORD(eventq_ptr);
1144 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1147 /* Unpin event queue */
1148 falcon_fini_special_buffer(efx, &channel->eventq);
1151 /* Free buffers backing event queue */
1152 void falcon_remove_eventq(struct efx_channel *channel)
1154 falcon_free_special_buffer(channel->efx, &channel->eventq);
1158 /* Generates a test event on the event queue. A subsequent call to
1159 * process_eventq() should pick up the event and place the value of
1160 * "magic" into channel->eventq_magic;
1162 void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1164 efx_qword_t test_event;
1166 EFX_POPULATE_QWORD_2(test_event,
1167 EV_CODE, DRV_GEN_EV_DECODE,
1169 falcon_generate_event(channel, &test_event);
1172 void falcon_sim_phy_event(struct efx_nic *efx)
1174 efx_qword_t phy_event;
1176 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1178 EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1180 EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1182 falcon_generate_event(&efx->channel[0], &phy_event);
1185 /**************************************************************************
1189 **************************************************************************/
1192 static void falcon_poll_flush_events(struct efx_nic *efx)
1194 struct efx_channel *channel = &efx->channel[0];
1195 struct efx_tx_queue *tx_queue;
1196 struct efx_rx_queue *rx_queue;
1197 unsigned int read_ptr = channel->eventq_read_ptr;
1198 unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
1201 efx_qword_t *event = falcon_event(channel, read_ptr);
1202 int ev_code, ev_sub_code, ev_queue;
1205 if (!falcon_event_present(event))
1208 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
1209 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1210 if (ev_code == DRIVER_EV_DECODE &&
1211 ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) {
1212 ev_queue = EFX_QWORD_FIELD(*event,
1213 DRIVER_EV_TX_DESCQ_ID);
1214 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1215 tx_queue = efx->tx_queue + ev_queue;
1216 tx_queue->flushed = true;
1218 } else if (ev_code == DRIVER_EV_DECODE &&
1219 ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) {
1220 ev_queue = EFX_QWORD_FIELD(*event,
1221 DRIVER_EV_RX_DESCQ_ID);
1222 ev_failed = EFX_QWORD_FIELD(*event,
1223 DRIVER_EV_RX_FLUSH_FAIL);
1224 if (ev_queue < efx->n_rx_queues) {
1225 rx_queue = efx->rx_queue + ev_queue;
1227 /* retry the rx flush */
1229 falcon_flush_rx_queue(rx_queue);
1231 rx_queue->flushed = true;
1235 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1236 } while (read_ptr != end_ptr);
1239 /* Handle tx and rx flushes at the same time, since they run in
1240 * parallel in the hardware and there's no reason for us to
1242 int falcon_flush_queues(struct efx_nic *efx)
1244 struct efx_rx_queue *rx_queue;
1245 struct efx_tx_queue *tx_queue;
1249 /* Issue flush requests */
1250 efx_for_each_tx_queue(tx_queue, efx) {
1251 tx_queue->flushed = false;
1252 falcon_flush_tx_queue(tx_queue);
1254 efx_for_each_rx_queue(rx_queue, efx) {
1255 rx_queue->flushed = false;
1256 falcon_flush_rx_queue(rx_queue);
1259 /* Poll the evq looking for flush completions. Since we're not pushing
1260 * any more rx or tx descriptors at this point, we're in no danger of
1261 * overflowing the evq whilst we wait */
1262 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1263 msleep(FALCON_FLUSH_INTERVAL);
1264 falcon_poll_flush_events(efx);
1266 /* Check if every queue has been succesfully flushed */
1267 outstanding = false;
1268 efx_for_each_tx_queue(tx_queue, efx)
1269 outstanding |= !tx_queue->flushed;
1270 efx_for_each_rx_queue(rx_queue, efx)
1271 outstanding |= !rx_queue->flushed;
1276 /* Mark the queues as all flushed. We're going to return failure
1277 * leading to a reset, or fake up success anyway. "flushed" now
1278 * indicates that we tried to flush. */
1279 efx_for_each_tx_queue(tx_queue, efx) {
1280 if (!tx_queue->flushed)
1281 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1283 tx_queue->flushed = true;
1285 efx_for_each_rx_queue(rx_queue, efx) {
1286 if (!rx_queue->flushed)
1287 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1289 rx_queue->flushed = true;
1292 if (EFX_WORKAROUND_7803(efx))
1298 /**************************************************************************
1300 * Falcon hardware interrupts
1301 * The hardware interrupt handler does very little work; all the event
1302 * queue processing is carried out by per-channel tasklets.
1304 **************************************************************************/
1306 /* Enable/disable/generate Falcon interrupts */
1307 static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1310 efx_oword_t int_en_reg_ker;
1312 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1314 DRV_INT_EN_KER, enabled);
1315 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
1318 void falcon_enable_interrupts(struct efx_nic *efx)
1320 efx_oword_t int_adr_reg_ker;
1321 struct efx_channel *channel;
1323 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1324 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1326 /* Program address */
1327 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1328 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
1329 INT_ADR_KER, efx->irq_status.dma_addr);
1330 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
1332 /* Enable interrupts */
1333 falcon_interrupts(efx, 1, 0);
1335 /* Force processing of all the channels to get the EVQ RPTRs up to
1337 efx_for_each_channel(channel, efx)
1338 efx_schedule_channel(channel);
1341 void falcon_disable_interrupts(struct efx_nic *efx)
1343 /* Disable interrupts */
1344 falcon_interrupts(efx, 0, 0);
1347 /* Generate a Falcon test interrupt
1348 * Interrupt must already have been enabled, otherwise nasty things
1351 void falcon_generate_interrupt(struct efx_nic *efx)
1353 falcon_interrupts(efx, 1, 1);
1356 /* Acknowledge a legacy interrupt from Falcon
1358 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1360 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1361 * BIU. Interrupt acknowledge is read sensitive so must write instead
1362 * (then read to ensure the BIU collector is flushed)
1364 * NB most hardware supports MSI interrupts
1366 static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1370 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
1371 falcon_writel(efx, ®, INT_ACK_REG_KER_A1);
1372 falcon_readl(efx, ®, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
1375 /* Process a fatal interrupt
1376 * Disable bus mastering ASAP and schedule a reset
1378 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1380 struct falcon_nic_data *nic_data = efx->nic_data;
1381 efx_oword_t *int_ker = efx->irq_status.addr;
1382 efx_oword_t fatal_intr;
1383 int error, mem_perr;
1385 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
1386 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
1388 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1389 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1390 EFX_OWORD_VAL(fatal_intr),
1391 error ? "disabling bus mastering" : "no recognised error");
1395 /* If this is a memory parity error dump which blocks are offending */
1396 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
1399 falcon_read(efx, ®, MEM_STAT_REG_KER);
1400 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1401 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1404 /* Disable both devices */
1405 pci_clear_master(efx->pci_dev);
1406 if (FALCON_IS_DUAL_FUNC(efx))
1407 pci_clear_master(nic_data->pci_dev2);
1408 falcon_disable_interrupts(efx);
1410 /* Count errors and reset or disable the NIC accordingly */
1411 if (nic_data->int_error_count == 0 ||
1412 time_after(jiffies, nic_data->int_error_expire)) {
1413 nic_data->int_error_count = 0;
1414 nic_data->int_error_expire =
1415 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1417 if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
1418 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1419 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1421 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1422 "NIC will be disabled\n");
1423 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1429 /* Handle a legacy interrupt from Falcon
1430 * Acknowledges the interrupt and schedule event queue processing.
1432 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1434 struct efx_nic *efx = dev_id;
1435 efx_oword_t *int_ker = efx->irq_status.addr;
1436 irqreturn_t result = IRQ_NONE;
1437 struct efx_channel *channel;
1442 /* Read the ISR which also ACKs the interrupts */
1443 falcon_readl(efx, ®, INT_ISR0_B0);
1444 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1446 /* Check to see if we have a serious error condition */
1447 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1448 if (unlikely(syserr))
1449 return falcon_fatal_interrupt(efx);
1451 /* Schedule processing of any interrupting queues */
1452 efx_for_each_channel(channel, efx) {
1454 falcon_event_present(
1455 falcon_event(channel, channel->eventq_read_ptr))) {
1456 efx_schedule_channel(channel);
1457 result = IRQ_HANDLED;
1462 if (result == IRQ_HANDLED) {
1463 efx->last_irq_cpu = raw_smp_processor_id();
1464 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1465 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1472 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1474 struct efx_nic *efx = dev_id;
1475 efx_oword_t *int_ker = efx->irq_status.addr;
1476 struct efx_channel *channel;
1480 /* Check to see if this is our interrupt. If it isn't, we
1481 * exit without having touched the hardware.
1483 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1484 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1485 raw_smp_processor_id());
1488 efx->last_irq_cpu = raw_smp_processor_id();
1489 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1490 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1492 /* Check to see if we have a serious error condition */
1493 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1494 if (unlikely(syserr))
1495 return falcon_fatal_interrupt(efx);
1497 /* Determine interrupting queues, clear interrupt status
1498 * register and acknowledge the device interrupt.
1500 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1501 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1502 EFX_ZERO_OWORD(*int_ker);
1503 wmb(); /* Ensure the vector is cleared before interrupt ack */
1504 falcon_irq_ack_a1(efx);
1506 /* Schedule processing of any interrupting queues */
1507 channel = &efx->channel[0];
1510 efx_schedule_channel(channel);
1518 /* Handle an MSI interrupt from Falcon
1520 * Handle an MSI hardware interrupt. This routine schedules event
1521 * queue processing. No interrupt acknowledgement cycle is necessary.
1522 * Also, we never need to check that the interrupt is for us, since
1523 * MSI interrupts cannot be shared.
1525 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1527 struct efx_channel *channel = dev_id;
1528 struct efx_nic *efx = channel->efx;
1529 efx_oword_t *int_ker = efx->irq_status.addr;
1532 efx->last_irq_cpu = raw_smp_processor_id();
1533 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1534 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1536 /* Check to see if we have a serious error condition */
1537 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1538 if (unlikely(syserr))
1539 return falcon_fatal_interrupt(efx);
1541 /* Schedule processing of the channel */
1542 efx_schedule_channel(channel);
1548 /* Setup RSS indirection table.
1549 * This maps from the hash value of the packet to RXQ
1551 static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1554 unsigned long offset;
1557 if (falcon_rev(efx) < FALCON_REV_B0)
1560 for (offset = RX_RSS_INDIR_TBL_B0;
1561 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1563 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1564 i % efx->n_rx_queues);
1565 falcon_writel(efx, &dword, offset);
1570 /* Hook interrupt handler(s)
1571 * Try MSI and then legacy interrupts.
1573 int falcon_init_interrupt(struct efx_nic *efx)
1575 struct efx_channel *channel;
1578 if (!EFX_INT_MODE_USE_MSI(efx)) {
1579 irq_handler_t handler;
1580 if (falcon_rev(efx) >= FALCON_REV_B0)
1581 handler = falcon_legacy_interrupt_b0;
1583 handler = falcon_legacy_interrupt_a1;
1585 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1588 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1595 /* Hook MSI or MSI-X interrupt */
1596 efx_for_each_channel(channel, efx) {
1597 rc = request_irq(channel->irq, falcon_msi_interrupt,
1598 IRQF_PROBE_SHARED, /* Not shared */
1599 channel->name, channel);
1601 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1609 efx_for_each_channel(channel, efx)
1610 free_irq(channel->irq, channel);
1615 void falcon_fini_interrupt(struct efx_nic *efx)
1617 struct efx_channel *channel;
1620 /* Disable MSI/MSI-X interrupts */
1621 efx_for_each_channel(channel, efx) {
1623 free_irq(channel->irq, channel);
1626 /* ACK legacy interrupt */
1627 if (falcon_rev(efx) >= FALCON_REV_B0)
1628 falcon_read(efx, ®, INT_ISR0_B0);
1630 falcon_irq_ack_a1(efx);
1632 /* Disable legacy interrupt */
1633 if (efx->legacy_irq)
1634 free_irq(efx->legacy_irq, efx);
1637 /**************************************************************************
1641 **************************************************************************
1644 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1646 static int falcon_spi_poll(struct efx_nic *efx)
1649 falcon_read(efx, ®, EE_SPI_HCMD_REG_KER);
1650 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1653 /* Wait for SPI command completion */
1654 static int falcon_spi_wait(struct efx_nic *efx)
1656 /* Most commands will finish quickly, so we start polling at
1657 * very short intervals. Sometimes the command may have to
1658 * wait for VPD or expansion ROM access outside of our
1659 * control, so we allow up to 100 ms. */
1660 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
1663 for (i = 0; i < 10; i++) {
1664 if (!falcon_spi_poll(efx))
1670 if (!falcon_spi_poll(efx))
1672 if (time_after_eq(jiffies, timeout)) {
1673 EFX_ERR(efx, "timed out waiting for SPI\n");
1676 schedule_timeout_uninterruptible(1);
1680 int falcon_spi_cmd(const struct efx_spi_device *spi,
1681 unsigned int command, int address,
1682 const void *in, void *out, size_t len)
1684 struct efx_nic *efx = spi->efx;
1685 bool addressed = (address >= 0);
1686 bool reading = (out != NULL);
1690 /* Input validation */
1691 if (len > FALCON_SPI_MAX_LEN)
1693 BUG_ON(!mutex_is_locked(&efx->spi_lock));
1695 /* Check that previous command is not still running */
1696 rc = falcon_spi_poll(efx);
1700 /* Program address register, if we have an address */
1702 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
1703 falcon_write(efx, ®, EE_SPI_HADR_REG_KER);
1706 /* Program data register, if we have data */
1708 memcpy(®, in, len);
1709 falcon_write(efx, ®, EE_SPI_HDATA_REG_KER);
1712 /* Issue read/write command */
1713 EFX_POPULATE_OWORD_7(reg,
1714 EE_SPI_HCMD_CMD_EN, 1,
1715 EE_SPI_HCMD_SF_SEL, spi->device_id,
1716 EE_SPI_HCMD_DABCNT, len,
1717 EE_SPI_HCMD_READ, reading,
1718 EE_SPI_HCMD_DUBCNT, 0,
1720 (addressed ? spi->addr_len : 0),
1721 EE_SPI_HCMD_ENC, command);
1722 falcon_write(efx, ®, EE_SPI_HCMD_REG_KER);
1724 /* Wait for read/write to complete */
1725 rc = falcon_spi_wait(efx);
1731 falcon_read(efx, ®, EE_SPI_HDATA_REG_KER);
1732 memcpy(out, ®, len);
1739 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1741 return min(FALCON_SPI_MAX_LEN,
1742 (spi->block_size - (start & (spi->block_size - 1))));
1746 efx_spi_munge_command(const struct efx_spi_device *spi,
1747 const u8 command, const unsigned int address)
1749 return command | (((address >> 8) & spi->munge_address) << 3);
1752 /* Wait up to 10 ms for buffered write completion */
1753 int falcon_spi_wait_write(const struct efx_spi_device *spi)
1755 struct efx_nic *efx = spi->efx;
1756 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1761 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1762 &status, sizeof(status));
1765 if (!(status & SPI_STATUS_NRDY))
1767 if (time_after_eq(jiffies, timeout)) {
1768 EFX_ERR(efx, "SPI write timeout on device %d"
1769 " last status=0x%02x\n",
1770 spi->device_id, status);
1773 schedule_timeout_uninterruptible(1);
1777 int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1778 size_t len, size_t *retlen, u8 *buffer)
1780 size_t block_len, pos = 0;
1781 unsigned int command;
1785 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1787 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1788 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
1789 buffer + pos, block_len);
1794 /* Avoid locking up the system */
1796 if (signal_pending(current)) {
1807 int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1808 size_t len, size_t *retlen, const u8 *buffer)
1810 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1811 size_t block_len, pos = 0;
1812 unsigned int command;
1816 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
1820 block_len = min(len - pos,
1821 falcon_spi_write_limit(spi, start + pos));
1822 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1823 rc = falcon_spi_cmd(spi, command, start + pos,
1824 buffer + pos, NULL, block_len);
1828 rc = falcon_spi_wait_write(spi);
1832 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1833 rc = falcon_spi_cmd(spi, command, start + pos,
1834 NULL, verify_buffer, block_len);
1835 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1842 /* Avoid locking up the system */
1844 if (signal_pending(current)) {
1855 /**************************************************************************
1859 **************************************************************************
1862 static int falcon_reset_macs(struct efx_nic *efx)
1867 if (falcon_rev(efx) < FALCON_REV_B0) {
1868 /* It's not safe to use GLB_CTL_REG to reset the
1869 * macs, so instead use the internal MAC resets
1871 if (!EFX_IS10G(efx)) {
1872 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1);
1873 falcon_write(efx, ®, GM_CFG1_REG);
1876 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0);
1877 falcon_write(efx, ®, GM_CFG1_REG);
1881 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
1882 falcon_write(efx, ®, XM_GLB_CFG_REG);
1884 for (count = 0; count < 10000; count++) {
1885 falcon_read(efx, ®, XM_GLB_CFG_REG);
1886 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
1891 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1896 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1897 * the drain sequence with the statistics fetch */
1898 efx_stats_disable(efx);
1900 falcon_read(efx, ®, MAC0_CTRL_REG_KER);
1901 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
1902 falcon_write(efx, ®, MAC0_CTRL_REG_KER);
1904 falcon_read(efx, ®, GLB_CTL_REG_KER);
1905 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1);
1906 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1);
1907 EFX_SET_OWORD_FIELD(reg, RST_EM, 1);
1908 falcon_write(efx, ®, GLB_CTL_REG_KER);
1912 falcon_read(efx, ®, GLB_CTL_REG_KER);
1913 if (!EFX_OWORD_FIELD(reg, RST_XGTX) &&
1914 !EFX_OWORD_FIELD(reg, RST_XGRX) &&
1915 !EFX_OWORD_FIELD(reg, RST_EM)) {
1916 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1921 EFX_ERR(efx, "MAC reset failed\n");
1928 efx_stats_enable(efx);
1930 /* If we've reset the EM block and the link is up, then
1931 * we'll have to kick the XAUI link so the PHY can recover */
1932 if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1933 falcon_reset_xaui(efx);
1938 void falcon_drain_tx_fifo(struct efx_nic *efx)
1942 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1943 (efx->loopback_mode != LOOPBACK_NONE))
1946 falcon_read(efx, ®, MAC0_CTRL_REG_KER);
1947 /* There is no point in draining more than once */
1948 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0))
1951 falcon_reset_macs(efx);
1954 void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1958 if (falcon_rev(efx) < FALCON_REV_B0)
1961 /* Isolate the MAC -> RX */
1962 falcon_read(efx, ®, RX_CFG_REG_KER);
1963 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0);
1964 falcon_write(efx, ®, RX_CFG_REG_KER);
1967 falcon_drain_tx_fifo(efx);
1970 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1976 switch (efx->link_speed) {
1977 case 10000: link_speed = 3; break;
1978 case 1000: link_speed = 2; break;
1979 case 100: link_speed = 1; break;
1980 default: link_speed = 0; break;
1982 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1983 * as advertised. Disable to ensure packets are not
1984 * indefinitely held and TX queue can be flushed at any point
1985 * while the link is down. */
1986 EFX_POPULATE_OWORD_5(reg,
1987 MAC_XOFF_VAL, 0xffff /* max pause time */,
1989 MAC_UC_PROM, efx->promiscuous,
1990 MAC_LINK_STATUS, 1, /* always set */
1991 MAC_SPEED, link_speed);
1992 /* On B0, MAC backpressure can be disabled and packets get
1994 if (falcon_rev(efx) >= FALCON_REV_B0) {
1995 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1999 falcon_write(efx, ®, MAC0_CTRL_REG_KER);
2001 /* Restore the multicast hash registers. */
2002 falcon_set_multicast_hash(efx);
2004 /* Transmission of pause frames when RX crosses the threshold is
2005 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2006 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2007 tx_fc = !!(efx->link_fc & EFX_FC_TX);
2008 falcon_read(efx, ®, RX_CFG_REG_KER);
2009 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
2011 /* Unisolate the MAC -> RX */
2012 if (falcon_rev(efx) >= FALCON_REV_B0)
2013 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
2014 falcon_write(efx, ®, RX_CFG_REG_KER);
2017 int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2023 if (disable_dma_stats)
2026 /* Statistics fetch will fail if the MAC is in TX drain */
2027 if (falcon_rev(efx) >= FALCON_REV_B0) {
2029 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
2030 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
2034 dma_done = (efx->stats_buffer.addr + done_offset);
2035 *dma_done = FALCON_STATS_NOT_DONE;
2036 wmb(); /* ensure done flag is clear */
2038 /* Initiate DMA transfer of stats */
2039 EFX_POPULATE_OWORD_2(reg,
2040 MAC_STAT_DMA_CMD, 1,
2042 efx->stats_buffer.dma_addr);
2043 falcon_write(efx, ®, MAC0_STAT_DMA_REG_KER);
2045 /* Wait for transfer to complete */
2046 for (i = 0; i < 400; i++) {
2047 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
2048 rmb(); /* Ensure the stats are valid. */
2054 EFX_ERR(efx, "timed out waiting for statistics\n");
2058 /**************************************************************************
2060 * PHY access via GMII
2062 **************************************************************************
2065 /* Wait for GMII access to complete */
2066 static int falcon_gmii_wait(struct efx_nic *efx)
2068 efx_dword_t md_stat;
2071 /* wait upto 50ms - taken max from datasheet */
2072 for (count = 0; count < 5000; count++) {
2073 falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
2074 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
2075 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
2076 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
2077 EFX_ERR(efx, "error from GMII access "
2079 EFX_DWORD_VAL(md_stat));
2086 EFX_ERR(efx, "timed out waiting for GMII\n");
2090 /* Write an MDIO register of a PHY connected to Falcon. */
2091 static int falcon_mdio_write(struct net_device *net_dev,
2092 int prtad, int devad, u16 addr, u16 value)
2094 struct efx_nic *efx = netdev_priv(net_dev);
2098 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
2099 prtad, devad, addr, value);
2101 spin_lock_bh(&efx->phy_lock);
2103 /* Check MDIO not currently being accessed */
2104 rc = falcon_gmii_wait(efx);
2108 /* Write the address/ID register */
2109 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
2110 falcon_write(efx, ®, MD_PHY_ADR_REG_KER);
2112 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
2113 falcon_write(efx, ®, MD_ID_REG_KER);
2116 EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
2117 falcon_write(efx, ®, MD_TXD_REG_KER);
2119 EFX_POPULATE_OWORD_2(reg,
2122 falcon_write(efx, ®, MD_CS_REG_KER);
2124 /* Wait for data to be written */
2125 rc = falcon_gmii_wait(efx);
2127 /* Abort the write operation */
2128 EFX_POPULATE_OWORD_2(reg,
2131 falcon_write(efx, ®, MD_CS_REG_KER);
2136 spin_unlock_bh(&efx->phy_lock);
2140 /* Read an MDIO register of a PHY connected to Falcon. */
2141 static int falcon_mdio_read(struct net_device *net_dev,
2142 int prtad, int devad, u16 addr)
2144 struct efx_nic *efx = netdev_priv(net_dev);
2148 spin_lock_bh(&efx->phy_lock);
2150 /* Check MDIO not currently being accessed */
2151 rc = falcon_gmii_wait(efx);
2155 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
2156 falcon_write(efx, ®, MD_PHY_ADR_REG_KER);
2158 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
2159 falcon_write(efx, ®, MD_ID_REG_KER);
2161 /* Request data to be read */
2162 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
2163 falcon_write(efx, ®, MD_CS_REG_KER);
2165 /* Wait for data to become available */
2166 rc = falcon_gmii_wait(efx);
2168 falcon_read(efx, ®, MD_RXD_REG_KER);
2169 rc = EFX_OWORD_FIELD(reg, MD_RXD);
2170 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2171 prtad, devad, addr, rc);
2173 /* Abort the read operation */
2174 EFX_POPULATE_OWORD_2(reg,
2177 falcon_write(efx, ®, MD_CS_REG_KER);
2179 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2180 prtad, devad, addr, rc);
2184 spin_unlock_bh(&efx->phy_lock);
2188 static int falcon_probe_phy(struct efx_nic *efx)
2190 switch (efx->phy_type) {
2191 case PHY_TYPE_SFX7101:
2192 efx->phy_op = &falcon_sfx7101_phy_ops;
2194 case PHY_TYPE_SFT9001A:
2195 case PHY_TYPE_SFT9001B:
2196 efx->phy_op = &falcon_sft9001_phy_ops;
2198 case PHY_TYPE_QT2022C2:
2199 case PHY_TYPE_QT2025C:
2200 efx->phy_op = &falcon_xfp_phy_ops;
2203 EFX_ERR(efx, "Unknown PHY type %d\n",
2208 if (efx->phy_op->macs & EFX_XMAC)
2209 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2210 (1 << LOOPBACK_XGXS) |
2211 (1 << LOOPBACK_XAUI));
2212 if (efx->phy_op->macs & EFX_GMAC)
2213 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2214 efx->loopback_modes |= efx->phy_op->loopbacks;
2219 int falcon_switch_mac(struct efx_nic *efx)
2221 struct efx_mac_operations *old_mac_op = efx->mac_op;
2222 efx_oword_t nic_stat;
2226 /* Don't try to fetch MAC stats while we're switching MACs */
2227 efx_stats_disable(efx);
2229 /* Internal loopbacks override the phy speed setting */
2230 if (efx->loopback_mode == LOOPBACK_GMAC) {
2231 efx->link_speed = 1000;
2232 efx->link_fd = true;
2233 } else if (LOOPBACK_INTERNAL(efx)) {
2234 efx->link_speed = 10000;
2235 efx->link_fd = true;
2238 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2239 efx->mac_op = (EFX_IS10G(efx) ?
2240 &falcon_xmac_operations : &falcon_gmac_operations);
2242 /* Always push the NIC_STAT_REG setting even if the mac hasn't
2243 * changed, because this function is run post online reset */
2244 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2245 strap_val = EFX_IS10G(efx) ? 5 : 3;
2246 if (falcon_rev(efx) >= FALCON_REV_B0) {
2247 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
2248 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
2249 falcon_write(efx, &nic_stat, NIC_STAT_REG);
2251 /* Falcon A1 does not support 1G/10G speed switching
2252 * and must not be used with a PHY that does. */
2253 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
2256 if (old_mac_op == efx->mac_op)
2259 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2260 /* Not all macs support a mac-level link state */
2263 rc = falcon_reset_macs(efx);
2265 efx_stats_enable(efx);
2269 /* This call is responsible for hooking in the MAC and PHY operations */
2270 int falcon_probe_port(struct efx_nic *efx)
2274 /* Hook in PHY operations table */
2275 rc = falcon_probe_phy(efx);
2279 /* Set up MDIO structure for PHY */
2280 efx->mdio.mmds = efx->phy_op->mmds;
2281 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
2282 efx->mdio.mdio_read = falcon_mdio_read;
2283 efx->mdio.mdio_write = falcon_mdio_write;
2285 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2286 if (falcon_rev(efx) >= FALCON_REV_B0)
2287 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2289 efx->wanted_fc = EFX_FC_RX;
2291 /* Allocate buffer for stats */
2292 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2293 FALCON_MAC_STATS_SIZE);
2296 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
2297 (u64)efx->stats_buffer.dma_addr,
2298 efx->stats_buffer.addr,
2299 (u64)virt_to_phys(efx->stats_buffer.addr));
2304 void falcon_remove_port(struct efx_nic *efx)
2306 falcon_free_buffer(efx, &efx->stats_buffer);
2309 /**************************************************************************
2311 * Multicast filtering
2313 **************************************************************************
2316 void falcon_set_multicast_hash(struct efx_nic *efx)
2318 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2320 /* Broadcast packets go through the multicast hash filter.
2321 * ether_crc_le() of the broadcast address is 0xbe2612ff
2322 * so we always add bit 0xff to the mask.
2324 set_bit_le(0xff, mc_hash->byte);
2326 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
2327 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2331 /**************************************************************************
2335 **************************************************************************/
2337 int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2339 struct falcon_nvconfig *nvconfig;
2340 struct efx_spi_device *spi;
2342 int rc, magic_num, struct_ver;
2343 __le16 *word, *limit;
2346 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2350 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2353 nvconfig = region + NVCONFIG_OFFSET;
2355 mutex_lock(&efx->spi_lock);
2356 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2357 mutex_unlock(&efx->spi_lock);
2359 EFX_ERR(efx, "Failed to read %s\n",
2360 efx->spi_flash ? "flash" : "EEPROM");
2365 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2366 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2369 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
2370 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2373 if (struct_ver < 2) {
2374 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
2376 } else if (struct_ver < 4) {
2377 word = &nvconfig->board_magic_num;
2378 limit = (__le16 *) (nvconfig + 1);
2381 limit = region + FALCON_NVCONFIG_END;
2383 for (csum = 0; word < limit; ++word)
2384 csum += le16_to_cpu(*word);
2386 if (~csum & 0xffff) {
2387 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
2393 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
2400 /* Registers tested in the falcon register test */
2404 } efx_test_registers[] = {
2405 { ADR_REGION_REG_KER,
2406 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2408 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2410 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2412 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2413 { MAC0_CTRL_REG_KER,
2414 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2415 { SRM_TX_DC_CFG_REG_KER,
2416 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2417 { RX_DC_CFG_REG_KER,
2418 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2419 { RX_DC_PF_WM_REG_KER,
2420 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2422 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2424 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2426 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2428 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2430 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2432 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2434 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2436 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2438 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2440 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2443 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2444 const efx_oword_t *mask)
2446 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
2447 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2450 int falcon_test_registers(struct efx_nic *efx)
2452 unsigned address = 0, i, j;
2453 efx_oword_t mask, imask, original, reg, buf;
2455 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2456 WARN_ON(!LOOPBACK_INTERNAL(efx));
2458 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2459 address = efx_test_registers[i].address;
2460 mask = imask = efx_test_registers[i].mask;
2461 EFX_INVERT_OWORD(imask);
2463 falcon_read(efx, &original, address);
2465 /* bit sweep on and off */
2466 for (j = 0; j < 128; j++) {
2467 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2470 /* Test this testable bit can be set in isolation */
2471 EFX_AND_OWORD(reg, original, mask);
2472 EFX_SET_OWORD32(reg, j, j, 1);
2474 falcon_write(efx, ®, address);
2475 falcon_read(efx, &buf, address);
2477 if (efx_masked_compare_oword(®, &buf, &mask))
2480 /* Test this testable bit can be cleared in isolation */
2481 EFX_OR_OWORD(reg, original, mask);
2482 EFX_SET_OWORD32(reg, j, j, 0);
2484 falcon_write(efx, ®, address);
2485 falcon_read(efx, &buf, address);
2487 if (efx_masked_compare_oword(®, &buf, &mask))
2491 falcon_write(efx, &original, address);
2497 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2498 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2499 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2503 /**************************************************************************
2507 **************************************************************************
2510 /* Resets NIC to known state. This routine must be called in process
2511 * context and is allowed to sleep. */
2512 int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2514 struct falcon_nic_data *nic_data = efx->nic_data;
2515 efx_oword_t glb_ctl_reg_ker;
2518 EFX_LOG(efx, "performing hardware reset (%d)\n", method);
2520 /* Initiate device reset */
2521 if (method == RESET_TYPE_WORLD) {
2522 rc = pci_save_state(efx->pci_dev);
2524 EFX_ERR(efx, "failed to backup PCI state of primary "
2525 "function prior to hardware reset\n");
2528 if (FALCON_IS_DUAL_FUNC(efx)) {
2529 rc = pci_save_state(nic_data->pci_dev2);
2531 EFX_ERR(efx, "failed to backup PCI state of "
2532 "secondary function prior to "
2533 "hardware reset\n");
2538 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2539 EXT_PHY_RST_DUR, 0x7,
2542 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2543 EXCLUDE_FROM_RESET : 0);
2545 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2546 EXT_PHY_RST_CTL, reset_phy,
2547 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
2548 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
2549 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
2550 EE_RST_CTL, EXCLUDE_FROM_RESET,
2551 EXT_PHY_RST_DUR, 0x7 /* 10ms */,
2554 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2556 EFX_LOG(efx, "waiting for hardware reset\n");
2557 schedule_timeout_uninterruptible(HZ / 20);
2559 /* Restore PCI configuration if needed */
2560 if (method == RESET_TYPE_WORLD) {
2561 if (FALCON_IS_DUAL_FUNC(efx)) {
2562 rc = pci_restore_state(nic_data->pci_dev2);
2564 EFX_ERR(efx, "failed to restore PCI config for "
2565 "the secondary function\n");
2569 rc = pci_restore_state(efx->pci_dev);
2571 EFX_ERR(efx, "failed to restore PCI config for the "
2572 "primary function\n");
2575 EFX_LOG(efx, "successfully restored PCI config\n");
2578 /* Assert that reset complete */
2579 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2580 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
2582 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2585 EFX_LOG(efx, "hardware reset complete\n");
2589 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2592 pci_restore_state(efx->pci_dev);
2599 /* Zeroes out the SRAM contents. This routine must be called in
2600 * process context and is allowed to sleep.
2602 static int falcon_reset_sram(struct efx_nic *efx)
2604 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2607 /* Set the SRAM wake/sleep GPIO appropriately. */
2608 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2609 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
2610 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
2611 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2613 /* Initiate SRAM reset */
2614 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2615 SRAM_OOB_BT_INIT_EN, 1,
2616 SRM_NUM_BANKS_AND_BANK_SIZE, 0);
2617 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2619 /* Wait for SRAM reset to complete */
2622 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2624 /* SRAM reset is slow; expect around 16ms */
2625 schedule_timeout_uninterruptible(HZ / 50);
2627 /* Check for reset complete */
2628 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2629 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
2630 EFX_LOG(efx, "SRAM reset complete\n");
2634 } while (++count < 20); /* wait upto 0.4 sec */
2636 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2640 static int falcon_spi_device_init(struct efx_nic *efx,
2641 struct efx_spi_device **spi_device_ret,
2642 unsigned int device_id, u32 device_type)
2644 struct efx_spi_device *spi_device;
2646 if (device_type != 0) {
2647 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2650 spi_device->device_id = device_id;
2652 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2653 spi_device->addr_len =
2654 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2655 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2656 spi_device->addr_len == 1);
2657 spi_device->erase_command =
2658 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2659 spi_device->erase_size =
2660 1 << SPI_DEV_TYPE_FIELD(device_type,
2661 SPI_DEV_TYPE_ERASE_SIZE);
2662 spi_device->block_size =
2663 1 << SPI_DEV_TYPE_FIELD(device_type,
2664 SPI_DEV_TYPE_BLOCK_SIZE);
2666 spi_device->efx = efx;
2671 kfree(*spi_device_ret);
2672 *spi_device_ret = spi_device;
2677 static void falcon_remove_spi_devices(struct efx_nic *efx)
2679 kfree(efx->spi_eeprom);
2680 efx->spi_eeprom = NULL;
2681 kfree(efx->spi_flash);
2682 efx->spi_flash = NULL;
2685 /* Extract non-volatile configuration */
2686 static int falcon_probe_nvconfig(struct efx_nic *efx)
2688 struct falcon_nvconfig *nvconfig;
2692 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2696 rc = falcon_read_nvram(efx, nvconfig);
2697 if (rc == -EINVAL) {
2698 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2699 efx->phy_type = PHY_TYPE_NONE;
2700 efx->mdio.prtad = MDIO_PRTAD_NONE;
2706 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2707 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2709 efx->phy_type = v2->port0_phy_type;
2710 efx->mdio.prtad = v2->port0_phy_addr;
2711 board_rev = le16_to_cpu(v2->board_revision);
2713 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2714 __le32 fl = v3->spi_device_type[EE_SPI_FLASH];
2715 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
2716 rc = falcon_spi_device_init(efx, &efx->spi_flash,
2721 rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
2729 /* Read the MAC addresses */
2730 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2732 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2734 falcon_probe_board(efx, board_rev);
2740 falcon_remove_spi_devices(efx);
2746 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2747 * count, port speed). Set workaround and feature flags accordingly.
2749 static int falcon_probe_nic_variant(struct efx_nic *efx)
2751 efx_oword_t altera_build;
2752 efx_oword_t nic_stat;
2754 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2755 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
2756 EFX_ERR(efx, "Falcon FPGA not supported\n");
2760 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2762 switch (falcon_rev(efx)) {
2765 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2769 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2770 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2779 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2783 /* Initial assumed speed */
2784 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
2789 /* Probe all SPI devices on the NIC */
2790 static void falcon_probe_spi_devices(struct efx_nic *efx)
2792 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2795 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
2796 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2797 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2799 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) {
2800 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ?
2801 EE_SPI_FLASH : EE_SPI_EEPROM);
2802 EFX_LOG(efx, "Booted from %s\n",
2803 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM");
2805 /* Disable VPD and set clock dividers to safe
2806 * values for initial programming. */
2808 EFX_LOG(efx, "Booted from internal ASIC settings;"
2809 " setting SPI config\n");
2810 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2811 /* 125 MHz / 7 ~= 20 MHz */
2813 /* 125 MHz / 63 ~= 2 MHz */
2814 EE_EE_CLOCK_DIV, 63);
2815 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2818 if (boot_dev == EE_SPI_FLASH)
2819 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH,
2820 default_flash_type);
2821 if (boot_dev == EE_SPI_EEPROM)
2822 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM,
2826 int falcon_probe_nic(struct efx_nic *efx)
2828 struct falcon_nic_data *nic_data;
2831 /* Allocate storage for hardware specific data */
2832 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2835 efx->nic_data = nic_data;
2837 /* Determine number of ports etc. */
2838 rc = falcon_probe_nic_variant(efx);
2842 /* Probe secondary function if expected */
2843 if (FALCON_IS_DUAL_FUNC(efx)) {
2844 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2846 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2848 if (dev->bus == efx->pci_dev->bus &&
2849 dev->devfn == efx->pci_dev->devfn + 1) {
2850 nic_data->pci_dev2 = dev;
2854 if (!nic_data->pci_dev2) {
2855 EFX_ERR(efx, "failed to find secondary function\n");
2861 /* Now we can reset the NIC */
2862 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2864 EFX_ERR(efx, "failed to reset NIC\n");
2868 /* Allocate memory for INT_KER */
2869 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2872 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2874 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
2875 (u64)efx->irq_status.dma_addr,
2876 efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
2878 falcon_probe_spi_devices(efx);
2880 /* Read in the non-volatile configuration */
2881 rc = falcon_probe_nvconfig(efx);
2885 /* Initialise I2C adapter */
2886 efx->i2c_adap.owner = THIS_MODULE;
2887 nic_data->i2c_data = falcon_i2c_bit_operations;
2888 nic_data->i2c_data.data = efx;
2889 efx->i2c_adap.algo_data = &nic_data->i2c_data;
2890 efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2891 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
2892 rc = i2c_bit_add_bus(&efx->i2c_adap);
2899 falcon_remove_spi_devices(efx);
2900 falcon_free_buffer(efx, &efx->irq_status);
2903 if (nic_data->pci_dev2) {
2904 pci_dev_put(nic_data->pci_dev2);
2905 nic_data->pci_dev2 = NULL;
2909 kfree(efx->nic_data);
2913 /* This call performs hardware-specific global initialisation, such as
2914 * defining the descriptor cache sizes and number of RSS channels.
2915 * It does not set up any buffers, descriptor rings or event queues.
2917 int falcon_init_nic(struct efx_nic *efx)
2923 /* Use on-chip SRAM */
2924 falcon_read(efx, &temp, NIC_STAT_REG);
2925 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
2926 falcon_write(efx, &temp, NIC_STAT_REG);
2928 /* Set the source of the GMAC clock */
2929 if (falcon_rev(efx) == FALCON_REV_B0) {
2930 falcon_read(efx, &temp, GPIO_CTL_REG_KER);
2931 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true);
2932 falcon_write(efx, &temp, GPIO_CTL_REG_KER);
2935 rc = falcon_reset_sram(efx);
2939 /* Set positions of descriptor caches in SRAM. */
2940 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2941 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
2942 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2943 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
2945 /* Set TX descriptor cache size. */
2946 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2947 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2948 falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
2950 /* Set RX descriptor cache size. Set low watermark to size-8, as
2951 * this allows most efficient prefetching.
2953 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2954 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2955 falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
2956 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2957 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
2959 /* Clear the parity enables on the TX data fifos as
2960 * they produce false parity errors because of timing issues
2962 if (EFX_WORKAROUND_5129(efx)) {
2963 falcon_read(efx, &temp, SPARE_REG_KER);
2964 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
2965 falcon_write(efx, &temp, SPARE_REG_KER);
2968 /* Enable all the genuinely fatal interrupts. (They are still
2969 * masked by the overall interrupt mask, controlled by
2970 * falcon_interrupts()).
2972 * Note: All other fatal interrupts are enabled
2974 EFX_POPULATE_OWORD_3(temp,
2975 ILL_ADR_INT_KER_EN, 1,
2976 RBUF_OWN_INT_KER_EN, 1,
2977 TBUF_OWN_INT_KER_EN, 1);
2978 EFX_INVERT_OWORD(temp);
2979 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
2981 if (EFX_WORKAROUND_7244(efx)) {
2982 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2983 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
2984 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
2985 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
2986 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
2987 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2990 falcon_setup_rss_indir_table(efx);
2992 /* Setup RX. Wait for descriptor is broken and must
2993 * be disabled. RXDP recovery shouldn't be needed, but is.
2995 falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
2996 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
2997 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
2998 if (EFX_WORKAROUND_5583(efx))
2999 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
3000 falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
3002 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3003 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3005 falcon_read(efx, &temp, TX_CFG2_REG_KER);
3006 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
3007 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
3008 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
3009 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
3010 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
3011 /* Enable SW_EV to inherit in char driver - assume harmless here */
3012 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
3013 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3014 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
3015 /* Squash TX of packets of 16 bytes or less */
3016 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3017 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
3018 falcon_write(efx, &temp, TX_CFG2_REG_KER);
3020 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3021 * descriptors (which is bad).
3023 falcon_read(efx, &temp, TX_CFG_REG_KER);
3024 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
3025 falcon_write(efx, &temp, TX_CFG_REG_KER);
3028 falcon_read(efx, &temp, RX_CFG_REG_KER);
3029 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
3030 if (EFX_WORKAROUND_7575(efx))
3031 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
3033 if (falcon_rev(efx) >= FALCON_REV_B0)
3034 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
3036 /* RX FIFO flow control thresholds */
3037 thresh = ((rx_xon_thresh_bytes >= 0) ?
3038 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
3039 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
3040 thresh = ((rx_xoff_thresh_bytes >= 0) ?
3041 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
3042 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
3043 /* RX control FIFO thresholds [32 entries] */
3044 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
3045 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
3046 falcon_write(efx, &temp, RX_CFG_REG_KER);
3048 /* Set destination of both TX and RX Flush events */
3049 if (falcon_rev(efx) >= FALCON_REV_B0) {
3050 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
3051 falcon_write(efx, &temp, DP_CTRL_REG);
3057 void falcon_remove_nic(struct efx_nic *efx)
3059 struct falcon_nic_data *nic_data = efx->nic_data;
3062 /* Remove I2C adapter and clear it in preparation for a retry */
3063 rc = i2c_del_adapter(&efx->i2c_adap);
3065 memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
3067 falcon_remove_spi_devices(efx);
3068 falcon_free_buffer(efx, &efx->irq_status);
3070 falcon_reset_hw(efx, RESET_TYPE_ALL);
3072 /* Release the second function after the reset */
3073 if (nic_data->pci_dev2) {
3074 pci_dev_put(nic_data->pci_dev2);
3075 nic_data->pci_dev2 = NULL;
3078 /* Tear down the private nic state */
3079 kfree(efx->nic_data);
3080 efx->nic_data = NULL;
3083 void falcon_update_nic_stats(struct efx_nic *efx)
3087 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
3088 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
3091 /**************************************************************************
3093 * Revision-dependent attributes used by efx.c
3095 **************************************************************************
3098 struct efx_nic_type falcon_a_nic_type = {
3100 .mem_map_size = 0x20000,
3101 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
3102 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
3103 .buf_tbl_base = BUF_TBL_KER_A1,
3104 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
3105 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
3106 .txd_ring_mask = FALCON_TXD_RING_MASK,
3107 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3108 .evq_size = FALCON_EVQ_SIZE,
3109 .max_dma_mask = FALCON_DMA_MASK,
3110 .tx_dma_mask = FALCON_TX_DMA_MASK,
3111 .bug5391_mask = 0xf,
3112 .rx_xoff_thresh = 2048,
3113 .rx_xon_thresh = 512,
3114 .rx_buffer_padding = 0x24,
3115 .max_interrupt_mode = EFX_INT_MODE_MSI,
3116 .phys_addr_channels = 4,
3119 struct efx_nic_type falcon_b_nic_type = {
3121 /* Map everything up to and including the RSS indirection
3122 * table. Don't map MSI-X table, MSI-X PBA since Linux
3123 * requires that they not be mapped. */
3124 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
3125 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
3126 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
3127 .buf_tbl_base = BUF_TBL_KER_B0,
3128 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
3129 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
3130 .txd_ring_mask = FALCON_TXD_RING_MASK,
3131 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3132 .evq_size = FALCON_EVQ_SIZE,
3133 .max_dma_mask = FALCON_DMA_MASK,
3134 .tx_dma_mask = FALCON_TX_DMA_MASK,
3136 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
3137 .rx_xon_thresh = 27648, /* ~3*max MTU */
3138 .rx_buffer_padding = 0,
3139 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3140 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
3141 * interrupt handler only supports 32