1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include "net_driver.h"
28 #include "workarounds.h"
30 /* Hardware control for SFC4000 (aka Falcon). */
32 /**************************************************************************
36 **************************************************************************
39 static int disable_dma_stats;
41 /* This is set to 16 for a good reason. In summary, if larger than
42 * 16, the descriptor cache holds more than a default socket
43 * buffer's worth of packets (for UDP we can only have at most one
44 * socket buffer's worth outstanding). This combined with the fact
45 * that we only get 1 TX event per descriptor cache means the NIC
48 #define TX_DC_ENTRIES 16
49 #define TX_DC_ENTRIES_ORDER 1
50 #define TX_DC_BASE 0x130000
52 #define RX_DC_ENTRIES 64
53 #define RX_DC_ENTRIES_ORDER 3
54 #define RX_DC_BASE 0x100000
56 static const unsigned int
57 /* "Large" EEPROM device: Atmel AT25640 or similar
58 * 8 KB, 16-bit address, 32 B write block */
59 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
60 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
61 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
62 /* Default flash device: Atmel AT25F1024
63 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
64 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
65 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
66 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
67 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
68 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
70 /* RX FIFO XOFF watermark
72 * When the amount of the RX FIFO increases used increases past this
73 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
74 * This also has an effect on RX/TX arbitration
76 static int rx_xoff_thresh_bytes = -1;
77 module_param(rx_xoff_thresh_bytes, int, 0644);
78 MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
80 /* RX FIFO XON watermark
82 * When the amount of the RX FIFO used decreases below this
83 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
84 * This also has an effect on RX/TX arbitration
86 static int rx_xon_thresh_bytes = -1;
87 module_param(rx_xon_thresh_bytes, int, 0644);
88 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
90 /* If FALCON_MAX_INT_ERRORS internal errors occur within
91 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
94 #define FALCON_INT_ERROR_EXPIRE 3600
95 #define FALCON_MAX_INT_ERRORS 5
97 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
99 #define FALCON_FLUSH_INTERVAL 10
100 #define FALCON_FLUSH_POLL_COUNT 100
102 /**************************************************************************
106 **************************************************************************
109 /* Size and alignment of special buffers (4KB) */
110 #define FALCON_BUF_SIZE 4096
112 /* Dummy SRAM size code */
113 #define SRM_NB_BSZ_ONCHIP_ONLY (-1)
115 #define FALCON_IS_DUAL_FUNC(efx) \
116 (falcon_rev(efx) < FALCON_REV_B0)
118 /**************************************************************************
120 * Falcon hardware access
122 **************************************************************************/
124 static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
127 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
131 /* Read the current event from the event queue */
132 static inline efx_qword_t *falcon_event(struct efx_channel *channel,
135 return (((efx_qword_t *) (channel->eventq.addr)) + index);
138 /* See if an event is present
140 * We check both the high and low dword of the event for all ones. We
141 * wrote all ones when we cleared the event, and no valid event can
142 * have all ones in either its high or low dwords. This approach is
143 * robust against reordering.
145 * Note that using a single 64-bit comparison is incorrect; even
146 * though the CPU read will be atomic, the DMA write may not be.
148 static inline int falcon_event_present(efx_qword_t *event)
150 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
151 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
154 /**************************************************************************
156 * I2C bus - this is a bit-bashing interface using GPIO pins
157 * Note that it uses the output enables to tristate the outputs
158 * SDA is the data pin and SCL is the clock
160 **************************************************************************
162 static void falcon_setsda(void *data, int state)
164 struct efx_nic *efx = (struct efx_nic *)data;
167 efx_reado(efx, ®, FR_AB_GPIO_CTL);
168 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
169 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
172 static void falcon_setscl(void *data, int state)
174 struct efx_nic *efx = (struct efx_nic *)data;
177 efx_reado(efx, ®, FR_AB_GPIO_CTL);
178 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
179 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
182 static int falcon_getsda(void *data)
184 struct efx_nic *efx = (struct efx_nic *)data;
187 efx_reado(efx, ®, FR_AB_GPIO_CTL);
188 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
191 static int falcon_getscl(void *data)
193 struct efx_nic *efx = (struct efx_nic *)data;
196 efx_reado(efx, ®, FR_AB_GPIO_CTL);
197 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
200 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
201 .setsda = falcon_setsda,
202 .setscl = falcon_setscl,
203 .getsda = falcon_getsda,
204 .getscl = falcon_getscl,
206 /* Wait up to 50 ms for slave to let us pull SCL high */
207 .timeout = DIV_ROUND_UP(HZ, 20),
210 /**************************************************************************
212 * Falcon special buffer handling
213 * Special buffers are used for event queues and the TX and RX
216 *************************************************************************/
219 * Initialise a Falcon special buffer
221 * This will define a buffer (previously allocated via
222 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
223 * it to be used for event queues, descriptor rings etc.
226 falcon_init_special_buffer(struct efx_nic *efx,
227 struct efx_special_buffer *buffer)
229 efx_qword_t buf_desc;
234 EFX_BUG_ON_PARANOID(!buffer->addr);
236 /* Write buffer descriptors to NIC */
237 for (i = 0; i < buffer->entries; i++) {
238 index = buffer->index + i;
239 dma_addr = buffer->dma_addr + (i * 4096);
240 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
241 index, (unsigned long long)dma_addr);
242 EFX_POPULATE_QWORD_3(buf_desc,
243 FRF_AZ_BUF_ADR_REGION, 0,
244 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
245 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
246 falcon_write_buf_tbl(efx, &buf_desc, index);
250 /* Unmaps a buffer from Falcon and clears the buffer table entries */
252 falcon_fini_special_buffer(struct efx_nic *efx,
253 struct efx_special_buffer *buffer)
255 efx_oword_t buf_tbl_upd;
256 unsigned int start = buffer->index;
257 unsigned int end = (buffer->index + buffer->entries - 1);
259 if (!buffer->entries)
262 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
263 buffer->index, buffer->index + buffer->entries - 1);
265 EFX_POPULATE_OWORD_4(buf_tbl_upd,
266 FRF_AZ_BUF_UPD_CMD, 0,
267 FRF_AZ_BUF_CLR_CMD, 1,
268 FRF_AZ_BUF_CLR_END_ID, end,
269 FRF_AZ_BUF_CLR_START_ID, start);
270 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
274 * Allocate a new Falcon special buffer
276 * This allocates memory for a new buffer, clears it and allocates a
277 * new buffer ID range. It does not write into Falcon's buffer table.
279 * This call will allocate 4KB buffers, since Falcon can't use 8KB
280 * buffers for event queues and descriptor rings.
282 static int falcon_alloc_special_buffer(struct efx_nic *efx,
283 struct efx_special_buffer *buffer,
286 len = ALIGN(len, FALCON_BUF_SIZE);
288 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
293 buffer->entries = len / FALCON_BUF_SIZE;
294 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
296 /* All zeros is a potentially valid event so memset to 0xff */
297 memset(buffer->addr, 0xff, len);
299 /* Select new buffer ID */
300 buffer->index = efx->next_buffer_table;
301 efx->next_buffer_table += buffer->entries;
303 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
304 "(virt %p phys %llx)\n", buffer->index,
305 buffer->index + buffer->entries - 1,
306 (u64)buffer->dma_addr, len,
307 buffer->addr, (u64)virt_to_phys(buffer->addr));
312 static void falcon_free_special_buffer(struct efx_nic *efx,
313 struct efx_special_buffer *buffer)
318 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
319 "(virt %p phys %llx)\n", buffer->index,
320 buffer->index + buffer->entries - 1,
321 (u64)buffer->dma_addr, buffer->len,
322 buffer->addr, (u64)virt_to_phys(buffer->addr));
324 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
330 /**************************************************************************
332 * Falcon generic buffer handling
333 * These buffers are used for interrupt status and MAC stats
335 **************************************************************************/
337 static int falcon_alloc_buffer(struct efx_nic *efx,
338 struct efx_buffer *buffer, unsigned int len)
340 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
345 memset(buffer->addr, 0, len);
349 static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
352 pci_free_consistent(efx->pci_dev, buffer->len,
353 buffer->addr, buffer->dma_addr);
358 /**************************************************************************
362 **************************************************************************/
364 /* Returns a pointer to the specified transmit descriptor in the TX
365 * descriptor queue belonging to the specified channel.
367 static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
370 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
373 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
374 static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
379 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
380 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
381 efx_writed_page(tx_queue->efx, ®,
382 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
386 /* For each entry inserted into the software descriptor ring, create a
387 * descriptor in the hardware TX descriptor ring (in host memory), and
390 void falcon_push_buffers(struct efx_tx_queue *tx_queue)
393 struct efx_tx_buffer *buffer;
397 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
400 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
401 buffer = &tx_queue->buffer[write_ptr];
402 txd = falcon_tx_desc(tx_queue, write_ptr);
403 ++tx_queue->write_count;
405 /* Create TX descriptor ring entry */
406 EFX_POPULATE_QWORD_4(*txd,
407 FSF_AZ_TX_KER_CONT, buffer->continuation,
408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
409 FSF_AZ_TX_KER_BUF_REGION, 0,
410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
411 } while (tx_queue->write_count != tx_queue->insert_count);
413 wmb(); /* Ensure descriptors are written before they are fetched */
414 falcon_notify_tx_desc(tx_queue);
417 /* Allocate hardware resources for a TX queue */
418 int falcon_probe_tx(struct efx_tx_queue *tx_queue)
420 struct efx_nic *efx = tx_queue->efx;
421 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
422 EFX_TXQ_SIZE & EFX_TXQ_MASK);
423 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
424 EFX_TXQ_SIZE * sizeof(efx_qword_t));
427 void falcon_init_tx(struct efx_tx_queue *tx_queue)
429 efx_oword_t tx_desc_ptr;
430 struct efx_nic *efx = tx_queue->efx;
432 tx_queue->flushed = false;
434 /* Pin TX descriptor ring */
435 falcon_init_special_buffer(efx, &tx_queue->txd);
437 /* Push TX descriptor ring to card */
438 EFX_POPULATE_OWORD_10(tx_desc_ptr,
439 FRF_AZ_TX_DESCQ_EN, 1,
440 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
441 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
442 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
443 FRF_AZ_TX_DESCQ_EVQ_ID,
444 tx_queue->channel->channel,
445 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
446 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
447 FRF_AZ_TX_DESCQ_SIZE,
448 __ffs(tx_queue->txd.entries),
449 FRF_AZ_TX_DESCQ_TYPE, 0,
450 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
452 if (falcon_rev(efx) >= FALCON_REV_B0) {
453 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
454 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
455 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
459 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
462 if (falcon_rev(efx) < FALCON_REV_B0) {
465 /* Only 128 bits in this register */
466 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
468 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
469 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
470 clear_bit_le(tx_queue->queue, (void *)®);
472 set_bit_le(tx_queue->queue, (void *)®);
473 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
477 static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
479 struct efx_nic *efx = tx_queue->efx;
480 efx_oword_t tx_flush_descq;
482 /* Post a flush command */
483 EFX_POPULATE_OWORD_2(tx_flush_descq,
484 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
485 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
486 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
489 void falcon_fini_tx(struct efx_tx_queue *tx_queue)
491 struct efx_nic *efx = tx_queue->efx;
492 efx_oword_t tx_desc_ptr;
494 /* The queue should have been flushed */
495 WARN_ON(!tx_queue->flushed);
497 /* Remove TX descriptor ring from card */
498 EFX_ZERO_OWORD(tx_desc_ptr);
499 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
502 /* Unpin TX descriptor ring */
503 falcon_fini_special_buffer(efx, &tx_queue->txd);
506 /* Free buffers backing TX queue */
507 void falcon_remove_tx(struct efx_tx_queue *tx_queue)
509 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
512 /**************************************************************************
516 **************************************************************************/
518 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
519 static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
522 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
525 /* This creates an entry in the RX descriptor queue */
526 static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
529 struct efx_rx_buffer *rx_buf;
532 rxd = falcon_rx_desc(rx_queue, index);
533 rx_buf = efx_rx_buffer(rx_queue, index);
534 EFX_POPULATE_QWORD_3(*rxd,
535 FSF_AZ_RX_KER_BUF_SIZE,
537 rx_queue->efx->type->rx_buffer_padding,
538 FSF_AZ_RX_KER_BUF_REGION, 0,
539 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
542 /* This writes to the RX_DESC_WPTR register for the specified receive
545 void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
550 while (rx_queue->notified_count != rx_queue->added_count) {
551 falcon_build_rx_desc(rx_queue,
552 rx_queue->notified_count &
554 ++rx_queue->notified_count;
558 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
559 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
560 efx_writed_page(rx_queue->efx, ®,
561 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
564 int falcon_probe_rx(struct efx_rx_queue *rx_queue)
566 struct efx_nic *efx = rx_queue->efx;
567 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
568 EFX_RXQ_SIZE & EFX_RXQ_MASK);
569 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
570 EFX_RXQ_SIZE * sizeof(efx_qword_t));
573 void falcon_init_rx(struct efx_rx_queue *rx_queue)
575 efx_oword_t rx_desc_ptr;
576 struct efx_nic *efx = rx_queue->efx;
577 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
578 bool iscsi_digest_en = is_b0;
580 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
581 rx_queue->queue, rx_queue->rxd.index,
582 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
584 rx_queue->flushed = false;
586 /* Pin RX descriptor ring */
587 falcon_init_special_buffer(efx, &rx_queue->rxd);
589 /* Push RX descriptor ring to card */
590 EFX_POPULATE_OWORD_10(rx_desc_ptr,
591 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
592 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
593 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
594 FRF_AZ_RX_DESCQ_EVQ_ID,
595 rx_queue->channel->channel,
596 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
597 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
598 FRF_AZ_RX_DESCQ_SIZE,
599 __ffs(rx_queue->rxd.entries),
600 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
601 /* For >=B0 this is scatter so disable */
602 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
603 FRF_AZ_RX_DESCQ_EN, 1);
604 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
608 static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
610 struct efx_nic *efx = rx_queue->efx;
611 efx_oword_t rx_flush_descq;
613 /* Post a flush command */
614 EFX_POPULATE_OWORD_2(rx_flush_descq,
615 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
616 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
617 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
620 void falcon_fini_rx(struct efx_rx_queue *rx_queue)
622 efx_oword_t rx_desc_ptr;
623 struct efx_nic *efx = rx_queue->efx;
625 /* The queue should already have been flushed */
626 WARN_ON(!rx_queue->flushed);
628 /* Remove RX descriptor ring from card */
629 EFX_ZERO_OWORD(rx_desc_ptr);
630 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
633 /* Unpin RX descriptor ring */
634 falcon_fini_special_buffer(efx, &rx_queue->rxd);
637 /* Free buffers backing RX queue */
638 void falcon_remove_rx(struct efx_rx_queue *rx_queue)
640 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
643 /**************************************************************************
645 * Falcon event queue processing
646 * Event queues are processed by per-channel tasklets.
648 **************************************************************************/
650 /* Update a channel's event queue's read pointer (RPTR) register
652 * This writes the EVQ_RPTR_REG register for the specified channel's
655 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
656 * whereas channel->eventq_read_ptr contains the index of the "next to
659 void falcon_eventq_read_ack(struct efx_channel *channel)
662 struct efx_nic *efx = channel->efx;
664 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
665 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
669 /* Use HW to insert a SW defined event */
670 void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
672 efx_oword_t drv_ev_reg;
674 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
675 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
676 drv_ev_reg.u32[0] = event->u32[0];
677 drv_ev_reg.u32[1] = event->u32[1];
678 drv_ev_reg.u32[2] = 0;
679 drv_ev_reg.u32[3] = 0;
680 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
681 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
684 /* Handle a transmit completion event
686 * Falcon batches TX completion events; the message we receive is of
687 * the form "complete all TX events up to this index".
689 static void falcon_handle_tx_event(struct efx_channel *channel,
692 unsigned int tx_ev_desc_ptr;
693 unsigned int tx_ev_q_label;
694 struct efx_tx_queue *tx_queue;
695 struct efx_nic *efx = channel->efx;
697 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
698 /* Transmit completion */
699 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
700 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
701 tx_queue = &efx->tx_queue[tx_ev_q_label];
702 channel->irq_mod_score +=
703 (tx_ev_desc_ptr - tx_queue->read_count) &
705 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
706 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
707 /* Rewrite the FIFO write pointer */
708 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
709 tx_queue = &efx->tx_queue[tx_ev_q_label];
711 if (efx_dev_registered(efx))
712 netif_tx_lock(efx->net_dev);
713 falcon_notify_tx_desc(tx_queue);
714 if (efx_dev_registered(efx))
715 netif_tx_unlock(efx->net_dev);
716 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
717 EFX_WORKAROUND_10727(efx)) {
718 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
720 EFX_ERR(efx, "channel %d unexpected TX event "
721 EFX_QWORD_FMT"\n", channel->channel,
722 EFX_QWORD_VAL(*event));
726 /* Detect errors included in the rx_evt_pkt_ok bit. */
727 static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
728 const efx_qword_t *event,
732 struct efx_nic *efx = rx_queue->efx;
733 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
734 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
735 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
736 bool rx_ev_other_err, rx_ev_pause_frm;
737 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
738 unsigned rx_ev_pkt_type;
740 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
741 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
742 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
743 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
744 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
745 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
746 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_FRAG_ERR);
747 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
748 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
749 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
750 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
751 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
752 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
753 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
754 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
755 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
757 /* Every error apart from tobe_disc and pause_frm */
758 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
759 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
760 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
762 /* Count errors that are not in MAC stats. Ignore expected
763 * checksum errors during self-test. */
765 ++rx_queue->channel->n_rx_frm_trunc;
766 else if (rx_ev_tobe_disc)
767 ++rx_queue->channel->n_rx_tobe_disc;
768 else if (!efx->loopback_selftest) {
769 if (rx_ev_ip_hdr_chksum_err)
770 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
771 else if (rx_ev_tcp_udp_chksum_err)
772 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
774 if (rx_ev_ip_frag_err)
775 ++rx_queue->channel->n_rx_ip_frag_err;
777 /* The frame must be discarded if any of these are true. */
778 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
779 rx_ev_tobe_disc | rx_ev_pause_frm);
781 /* TOBE_DISC is expected on unicast mismatches; don't print out an
782 * error message. FRM_TRUNC indicates RXDP dropped the packet due
783 * to a FIFO overflow.
785 #ifdef EFX_ENABLE_DEBUG
786 if (rx_ev_other_err) {
787 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
788 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
789 rx_queue->queue, EFX_QWORD_VAL(*event),
790 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
791 rx_ev_ip_hdr_chksum_err ?
792 " [IP_HDR_CHKSUM_ERR]" : "",
793 rx_ev_tcp_udp_chksum_err ?
794 " [TCP_UDP_CHKSUM_ERR]" : "",
795 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
796 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
797 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
798 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
799 rx_ev_pause_frm ? " [PAUSE]" : "");
804 /* Handle receive events that are not in-order. */
805 static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
808 struct efx_nic *efx = rx_queue->efx;
809 unsigned expected, dropped;
811 expected = rx_queue->removed_count & EFX_RXQ_MASK;
812 dropped = (index - expected) & EFX_RXQ_MASK;
813 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
814 dropped, index, expected);
816 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
817 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
820 /* Handle a packet received event
822 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
823 * wrong destination address
824 * Also "is multicast" and "matches multicast filter" flags can be used to
825 * discard non-matching multicast packets.
827 static void falcon_handle_rx_event(struct efx_channel *channel,
828 const efx_qword_t *event)
830 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
831 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
832 unsigned expected_ptr;
833 bool rx_ev_pkt_ok, discard = false, checksummed;
834 struct efx_rx_queue *rx_queue;
835 struct efx_nic *efx = channel->efx;
837 /* Basic packet information */
838 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
839 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
840 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
841 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
842 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
843 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
846 rx_queue = &efx->rx_queue[channel->channel];
848 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
849 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
850 if (unlikely(rx_ev_desc_ptr != expected_ptr))
851 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
853 if (likely(rx_ev_pkt_ok)) {
854 /* If packet is marked as OK and packet type is TCP/IPv4 or
855 * UDP/IPv4, then we can rely on the hardware checksum.
858 efx->rx_checksum_enabled &&
859 (rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
860 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP);
862 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
867 /* Detect multicast packets that didn't match the filter */
868 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
869 if (rx_ev_mcast_pkt) {
870 unsigned int rx_ev_mcast_hash_match =
871 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
873 if (unlikely(!rx_ev_mcast_hash_match))
877 channel->irq_mod_score += 2;
879 /* Handle received packet */
880 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
881 checksummed, discard);
884 /* Global events are basically PHY events */
885 static void falcon_handle_global_event(struct efx_channel *channel,
888 struct efx_nic *efx = channel->efx;
889 bool handled = false;
891 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
892 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
893 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
894 efx->phy_op->clear_interrupt(efx);
895 queue_work(efx->workqueue, &efx->phy_work);
899 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
900 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
901 queue_work(efx->workqueue, &efx->mac_work);
905 if (falcon_rev(efx) <= FALCON_REV_A1 ?
906 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
907 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
908 EFX_ERR(efx, "channel %d seen global RX_RESET "
909 "event. Resetting.\n", channel->channel);
911 atomic_inc(&efx->rx_reset);
912 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
913 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
918 EFX_ERR(efx, "channel %d unknown global event "
919 EFX_QWORD_FMT "\n", channel->channel,
920 EFX_QWORD_VAL(*event));
923 static void falcon_handle_driver_event(struct efx_channel *channel,
926 struct efx_nic *efx = channel->efx;
927 unsigned int ev_sub_code;
928 unsigned int ev_sub_data;
930 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
931 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
933 switch (ev_sub_code) {
934 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
935 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
936 channel->channel, ev_sub_data);
938 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
939 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
940 channel->channel, ev_sub_data);
942 case FSE_AZ_EVQ_INIT_DONE_EV:
943 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
944 channel->channel, ev_sub_data);
946 case FSE_AZ_SRM_UPD_DONE_EV:
947 EFX_TRACE(efx, "channel %d SRAM update done\n",
950 case FSE_AZ_WAKE_UP_EV:
951 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
952 channel->channel, ev_sub_data);
954 case FSE_AZ_TIMER_EV:
955 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
956 channel->channel, ev_sub_data);
958 case FSE_AA_RX_RECOVER_EV:
959 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
960 "Resetting.\n", channel->channel);
961 atomic_inc(&efx->rx_reset);
962 efx_schedule_reset(efx,
963 EFX_WORKAROUND_6555(efx) ?
964 RESET_TYPE_RX_RECOVERY :
967 case FSE_BZ_RX_DSC_ERROR_EV:
968 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
969 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
970 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
972 case FSE_BZ_TX_DSC_ERROR_EV:
973 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
974 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
975 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
978 EFX_TRACE(efx, "channel %d unknown driver event code %d "
979 "data %04x\n", channel->channel, ev_sub_code,
985 int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
987 unsigned int read_ptr;
988 efx_qword_t event, *p_event;
992 read_ptr = channel->eventq_read_ptr;
995 p_event = falcon_event(channel, read_ptr);
998 if (!falcon_event_present(&event))
1002 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1003 channel->channel, EFX_QWORD_VAL(event));
1005 /* Clear this event by marking it all ones */
1006 EFX_SET_QWORD(*p_event);
1008 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1011 case FSE_AZ_EV_CODE_RX_EV:
1012 falcon_handle_rx_event(channel, &event);
1015 case FSE_AZ_EV_CODE_TX_EV:
1016 falcon_handle_tx_event(channel, &event);
1018 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1019 channel->eventq_magic = EFX_QWORD_FIELD(
1020 event, FSF_AZ_DRV_GEN_EV_MAGIC);
1021 EFX_LOG(channel->efx, "channel %d received generated "
1022 "event "EFX_QWORD_FMT"\n", channel->channel,
1023 EFX_QWORD_VAL(event));
1025 case FSE_AZ_EV_CODE_GLOBAL_EV:
1026 falcon_handle_global_event(channel, &event);
1028 case FSE_AZ_EV_CODE_DRIVER_EV:
1029 falcon_handle_driver_event(channel, &event);
1032 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1033 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1034 ev_code, EFX_QWORD_VAL(event));
1037 /* Increment read pointer */
1038 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1040 } while (rx_packets < rx_quota);
1042 channel->eventq_read_ptr = read_ptr;
1046 void falcon_set_int_moderation(struct efx_channel *channel)
1048 efx_dword_t timer_cmd;
1049 struct efx_nic *efx = channel->efx;
1051 /* Set timer register */
1052 if (channel->irq_moderation) {
1053 EFX_POPULATE_DWORD_2(timer_cmd,
1054 FRF_AB_TC_TIMER_MODE,
1055 FFE_BB_TIMER_MODE_INT_HLDOFF,
1056 FRF_AB_TC_TIMER_VAL,
1057 channel->irq_moderation - 1);
1059 EFX_POPULATE_DWORD_2(timer_cmd,
1060 FRF_AB_TC_TIMER_MODE,
1061 FFE_BB_TIMER_MODE_DIS,
1062 FRF_AB_TC_TIMER_VAL, 0);
1064 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1065 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
1070 /* Allocate buffer table entries for event queue */
1071 int falcon_probe_eventq(struct efx_channel *channel)
1073 struct efx_nic *efx = channel->efx;
1074 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1075 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1076 return falcon_alloc_special_buffer(efx, &channel->eventq,
1077 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1080 void falcon_init_eventq(struct efx_channel *channel)
1082 efx_oword_t evq_ptr;
1083 struct efx_nic *efx = channel->efx;
1085 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1086 channel->channel, channel->eventq.index,
1087 channel->eventq.index + channel->eventq.entries - 1);
1089 /* Pin event queue buffer */
1090 falcon_init_special_buffer(efx, &channel->eventq);
1092 /* Fill event queue with all ones (i.e. empty events) */
1093 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1095 /* Push event queue to card */
1096 EFX_POPULATE_OWORD_3(evq_ptr,
1098 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1099 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1100 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1103 falcon_set_int_moderation(channel);
1106 void falcon_fini_eventq(struct efx_channel *channel)
1108 efx_oword_t eventq_ptr;
1109 struct efx_nic *efx = channel->efx;
1111 /* Remove event queue from card */
1112 EFX_ZERO_OWORD(eventq_ptr);
1113 efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1116 /* Unpin event queue */
1117 falcon_fini_special_buffer(efx, &channel->eventq);
1120 /* Free buffers backing event queue */
1121 void falcon_remove_eventq(struct efx_channel *channel)
1123 falcon_free_special_buffer(channel->efx, &channel->eventq);
1127 /* Generates a test event on the event queue. A subsequent call to
1128 * process_eventq() should pick up the event and place the value of
1129 * "magic" into channel->eventq_magic;
1131 void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1133 efx_qword_t test_event;
1135 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1136 FSE_AZ_EV_CODE_DRV_GEN_EV,
1137 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1138 falcon_generate_event(channel, &test_event);
1141 void falcon_sim_phy_event(struct efx_nic *efx)
1143 efx_qword_t phy_event;
1145 EFX_POPULATE_QWORD_1(phy_event, FSF_AZ_EV_CODE,
1146 FSE_AZ_EV_CODE_GLOBAL_EV);
1148 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_XG_PHY0_INTR, 1);
1150 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_G_PHY0_INTR, 1);
1152 falcon_generate_event(&efx->channel[0], &phy_event);
1155 /**************************************************************************
1159 **************************************************************************/
1162 static void falcon_poll_flush_events(struct efx_nic *efx)
1164 struct efx_channel *channel = &efx->channel[0];
1165 struct efx_tx_queue *tx_queue;
1166 struct efx_rx_queue *rx_queue;
1167 unsigned int read_ptr = channel->eventq_read_ptr;
1168 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1171 efx_qword_t *event = falcon_event(channel, read_ptr);
1172 int ev_code, ev_sub_code, ev_queue;
1175 if (!falcon_event_present(event))
1178 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1179 ev_sub_code = EFX_QWORD_FIELD(*event,
1180 FSF_AZ_DRIVER_EV_SUBCODE);
1181 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1182 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1183 ev_queue = EFX_QWORD_FIELD(*event,
1184 FSF_AZ_DRIVER_EV_SUBDATA);
1185 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1186 tx_queue = efx->tx_queue + ev_queue;
1187 tx_queue->flushed = true;
1189 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1190 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1191 ev_queue = EFX_QWORD_FIELD(
1192 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1193 ev_failed = EFX_QWORD_FIELD(
1194 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1195 if (ev_queue < efx->n_rx_queues) {
1196 rx_queue = efx->rx_queue + ev_queue;
1198 /* retry the rx flush */
1200 falcon_flush_rx_queue(rx_queue);
1202 rx_queue->flushed = true;
1206 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1207 } while (read_ptr != end_ptr);
1210 /* Handle tx and rx flushes at the same time, since they run in
1211 * parallel in the hardware and there's no reason for us to
1213 int falcon_flush_queues(struct efx_nic *efx)
1215 struct efx_rx_queue *rx_queue;
1216 struct efx_tx_queue *tx_queue;
1220 /* Issue flush requests */
1221 efx_for_each_tx_queue(tx_queue, efx) {
1222 tx_queue->flushed = false;
1223 falcon_flush_tx_queue(tx_queue);
1225 efx_for_each_rx_queue(rx_queue, efx) {
1226 rx_queue->flushed = false;
1227 falcon_flush_rx_queue(rx_queue);
1230 /* Poll the evq looking for flush completions. Since we're not pushing
1231 * any more rx or tx descriptors at this point, we're in no danger of
1232 * overflowing the evq whilst we wait */
1233 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1234 msleep(FALCON_FLUSH_INTERVAL);
1235 falcon_poll_flush_events(efx);
1237 /* Check if every queue has been succesfully flushed */
1238 outstanding = false;
1239 efx_for_each_tx_queue(tx_queue, efx)
1240 outstanding |= !tx_queue->flushed;
1241 efx_for_each_rx_queue(rx_queue, efx)
1242 outstanding |= !rx_queue->flushed;
1247 /* Mark the queues as all flushed. We're going to return failure
1248 * leading to a reset, or fake up success anyway. "flushed" now
1249 * indicates that we tried to flush. */
1250 efx_for_each_tx_queue(tx_queue, efx) {
1251 if (!tx_queue->flushed)
1252 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1254 tx_queue->flushed = true;
1256 efx_for_each_rx_queue(rx_queue, efx) {
1257 if (!rx_queue->flushed)
1258 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1260 rx_queue->flushed = true;
1263 if (EFX_WORKAROUND_7803(efx))
1269 /**************************************************************************
1271 * Falcon hardware interrupts
1272 * The hardware interrupt handler does very little work; all the event
1273 * queue processing is carried out by per-channel tasklets.
1275 **************************************************************************/
1277 /* Enable/disable/generate Falcon interrupts */
1278 static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1281 efx_oword_t int_en_reg_ker;
1283 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1284 FRF_AZ_KER_INT_KER, force,
1285 FRF_AZ_DRV_INT_EN_KER, enabled);
1286 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1289 void falcon_enable_interrupts(struct efx_nic *efx)
1291 efx_oword_t int_adr_reg_ker;
1292 struct efx_channel *channel;
1294 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1295 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1297 /* Program address */
1298 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1299 FRF_AZ_NORM_INT_VEC_DIS_KER,
1300 EFX_INT_MODE_USE_MSI(efx),
1301 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1302 efx_writeo(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER);
1304 /* Enable interrupts */
1305 falcon_interrupts(efx, 1, 0);
1307 /* Force processing of all the channels to get the EVQ RPTRs up to
1309 efx_for_each_channel(channel, efx)
1310 efx_schedule_channel(channel);
1313 void falcon_disable_interrupts(struct efx_nic *efx)
1315 /* Disable interrupts */
1316 falcon_interrupts(efx, 0, 0);
1319 /* Generate a Falcon test interrupt
1320 * Interrupt must already have been enabled, otherwise nasty things
1323 void falcon_generate_interrupt(struct efx_nic *efx)
1325 falcon_interrupts(efx, 1, 1);
1328 /* Acknowledge a legacy interrupt from Falcon
1330 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1332 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1333 * BIU. Interrupt acknowledge is read sensitive so must write instead
1334 * (then read to ensure the BIU collector is flushed)
1336 * NB most hardware supports MSI interrupts
1338 static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1342 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1343 efx_writed(efx, ®, FR_AA_INT_ACK_KER);
1344 efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1347 /* Process a fatal interrupt
1348 * Disable bus mastering ASAP and schedule a reset
1350 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1352 struct falcon_nic_data *nic_data = efx->nic_data;
1353 efx_oword_t *int_ker = efx->irq_status.addr;
1354 efx_oword_t fatal_intr;
1355 int error, mem_perr;
1357 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1358 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1360 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1361 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1362 EFX_OWORD_VAL(fatal_intr),
1363 error ? "disabling bus mastering" : "no recognised error");
1367 /* If this is a memory parity error dump which blocks are offending */
1368 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1371 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1372 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1373 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1376 /* Disable both devices */
1377 pci_clear_master(efx->pci_dev);
1378 if (FALCON_IS_DUAL_FUNC(efx))
1379 pci_clear_master(nic_data->pci_dev2);
1380 falcon_disable_interrupts(efx);
1382 /* Count errors and reset or disable the NIC accordingly */
1383 if (efx->int_error_count == 0 ||
1384 time_after(jiffies, efx->int_error_expire)) {
1385 efx->int_error_count = 0;
1386 efx->int_error_expire =
1387 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1389 if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) {
1390 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1391 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1393 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1394 "NIC will be disabled\n");
1395 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1401 /* Handle a legacy interrupt from Falcon
1402 * Acknowledges the interrupt and schedule event queue processing.
1404 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1406 struct efx_nic *efx = dev_id;
1407 efx_oword_t *int_ker = efx->irq_status.addr;
1408 irqreturn_t result = IRQ_NONE;
1409 struct efx_channel *channel;
1414 /* Read the ISR which also ACKs the interrupts */
1415 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1416 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1418 /* Check to see if we have a serious error condition */
1419 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1420 if (unlikely(syserr))
1421 return falcon_fatal_interrupt(efx);
1423 /* Schedule processing of any interrupting queues */
1424 efx_for_each_channel(channel, efx) {
1426 falcon_event_present(
1427 falcon_event(channel, channel->eventq_read_ptr))) {
1428 efx_schedule_channel(channel);
1429 result = IRQ_HANDLED;
1434 if (result == IRQ_HANDLED) {
1435 efx->last_irq_cpu = raw_smp_processor_id();
1436 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1437 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1444 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1446 struct efx_nic *efx = dev_id;
1447 efx_oword_t *int_ker = efx->irq_status.addr;
1448 struct efx_channel *channel;
1452 /* Check to see if this is our interrupt. If it isn't, we
1453 * exit without having touched the hardware.
1455 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1456 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1457 raw_smp_processor_id());
1460 efx->last_irq_cpu = raw_smp_processor_id();
1461 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1462 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1464 /* Check to see if we have a serious error condition */
1465 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1466 if (unlikely(syserr))
1467 return falcon_fatal_interrupt(efx);
1469 /* Determine interrupting queues, clear interrupt status
1470 * register and acknowledge the device interrupt.
1472 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1473 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1474 EFX_ZERO_OWORD(*int_ker);
1475 wmb(); /* Ensure the vector is cleared before interrupt ack */
1476 falcon_irq_ack_a1(efx);
1478 /* Schedule processing of any interrupting queues */
1479 channel = &efx->channel[0];
1482 efx_schedule_channel(channel);
1490 /* Handle an MSI interrupt from Falcon
1492 * Handle an MSI hardware interrupt. This routine schedules event
1493 * queue processing. No interrupt acknowledgement cycle is necessary.
1494 * Also, we never need to check that the interrupt is for us, since
1495 * MSI interrupts cannot be shared.
1497 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1499 struct efx_channel *channel = dev_id;
1500 struct efx_nic *efx = channel->efx;
1501 efx_oword_t *int_ker = efx->irq_status.addr;
1504 efx->last_irq_cpu = raw_smp_processor_id();
1505 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1506 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1508 /* Check to see if we have a serious error condition */
1509 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1510 if (unlikely(syserr))
1511 return falcon_fatal_interrupt(efx);
1513 /* Schedule processing of the channel */
1514 efx_schedule_channel(channel);
1520 /* Setup RSS indirection table.
1521 * This maps from the hash value of the packet to RXQ
1523 static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1526 unsigned long offset;
1529 if (falcon_rev(efx) < FALCON_REV_B0)
1532 for (offset = FR_BZ_RX_INDIRECTION_TBL;
1533 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1535 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1536 i % efx->n_rx_queues);
1537 efx_writed(efx, &dword, offset);
1542 /* Hook interrupt handler(s)
1543 * Try MSI and then legacy interrupts.
1545 int falcon_init_interrupt(struct efx_nic *efx)
1547 struct efx_channel *channel;
1550 if (!EFX_INT_MODE_USE_MSI(efx)) {
1551 irq_handler_t handler;
1552 if (falcon_rev(efx) >= FALCON_REV_B0)
1553 handler = falcon_legacy_interrupt_b0;
1555 handler = falcon_legacy_interrupt_a1;
1557 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1560 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1567 /* Hook MSI or MSI-X interrupt */
1568 efx_for_each_channel(channel, efx) {
1569 rc = request_irq(channel->irq, falcon_msi_interrupt,
1570 IRQF_PROBE_SHARED, /* Not shared */
1571 channel->name, channel);
1573 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1581 efx_for_each_channel(channel, efx)
1582 free_irq(channel->irq, channel);
1587 void falcon_fini_interrupt(struct efx_nic *efx)
1589 struct efx_channel *channel;
1592 /* Disable MSI/MSI-X interrupts */
1593 efx_for_each_channel(channel, efx) {
1595 free_irq(channel->irq, channel);
1598 /* ACK legacy interrupt */
1599 if (falcon_rev(efx) >= FALCON_REV_B0)
1600 efx_reado(efx, ®, FR_BZ_INT_ISR0);
1602 falcon_irq_ack_a1(efx);
1604 /* Disable legacy interrupt */
1605 if (efx->legacy_irq)
1606 free_irq(efx->legacy_irq, efx);
1609 /**************************************************************************
1613 **************************************************************************
1616 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1618 static int falcon_spi_poll(struct efx_nic *efx)
1621 efx_reado(efx, ®, FR_AB_EE_SPI_HCMD);
1622 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1625 /* Wait for SPI command completion */
1626 static int falcon_spi_wait(struct efx_nic *efx)
1628 /* Most commands will finish quickly, so we start polling at
1629 * very short intervals. Sometimes the command may have to
1630 * wait for VPD or expansion ROM access outside of our
1631 * control, so we allow up to 100 ms. */
1632 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
1635 for (i = 0; i < 10; i++) {
1636 if (!falcon_spi_poll(efx))
1642 if (!falcon_spi_poll(efx))
1644 if (time_after_eq(jiffies, timeout)) {
1645 EFX_ERR(efx, "timed out waiting for SPI\n");
1648 schedule_timeout_uninterruptible(1);
1652 int falcon_spi_cmd(const struct efx_spi_device *spi,
1653 unsigned int command, int address,
1654 const void *in, void *out, size_t len)
1656 struct efx_nic *efx = spi->efx;
1657 bool addressed = (address >= 0);
1658 bool reading = (out != NULL);
1662 /* Input validation */
1663 if (len > FALCON_SPI_MAX_LEN)
1665 BUG_ON(!mutex_is_locked(&efx->spi_lock));
1667 /* Check that previous command is not still running */
1668 rc = falcon_spi_poll(efx);
1672 /* Program address register, if we have an address */
1674 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1675 efx_writeo(efx, ®, FR_AB_EE_SPI_HADR);
1678 /* Program data register, if we have data */
1680 memcpy(®, in, len);
1681 efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA);
1684 /* Issue read/write command */
1685 EFX_POPULATE_OWORD_7(reg,
1686 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
1687 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
1688 FRF_AB_EE_SPI_HCMD_DABCNT, len,
1689 FRF_AB_EE_SPI_HCMD_READ, reading,
1690 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
1691 FRF_AB_EE_SPI_HCMD_ADBCNT,
1692 (addressed ? spi->addr_len : 0),
1693 FRF_AB_EE_SPI_HCMD_ENC, command);
1694 efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD);
1696 /* Wait for read/write to complete */
1697 rc = falcon_spi_wait(efx);
1703 efx_reado(efx, ®, FR_AB_EE_SPI_HDATA);
1704 memcpy(out, ®, len);
1711 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1713 return min(FALCON_SPI_MAX_LEN,
1714 (spi->block_size - (start & (spi->block_size - 1))));
1718 efx_spi_munge_command(const struct efx_spi_device *spi,
1719 const u8 command, const unsigned int address)
1721 return command | (((address >> 8) & spi->munge_address) << 3);
1724 /* Wait up to 10 ms for buffered write completion */
1725 int falcon_spi_wait_write(const struct efx_spi_device *spi)
1727 struct efx_nic *efx = spi->efx;
1728 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1733 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1734 &status, sizeof(status));
1737 if (!(status & SPI_STATUS_NRDY))
1739 if (time_after_eq(jiffies, timeout)) {
1740 EFX_ERR(efx, "SPI write timeout on device %d"
1741 " last status=0x%02x\n",
1742 spi->device_id, status);
1745 schedule_timeout_uninterruptible(1);
1749 int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1750 size_t len, size_t *retlen, u8 *buffer)
1752 size_t block_len, pos = 0;
1753 unsigned int command;
1757 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1759 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1760 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
1761 buffer + pos, block_len);
1766 /* Avoid locking up the system */
1768 if (signal_pending(current)) {
1779 int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1780 size_t len, size_t *retlen, const u8 *buffer)
1782 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1783 size_t block_len, pos = 0;
1784 unsigned int command;
1788 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
1792 block_len = min(len - pos,
1793 falcon_spi_write_limit(spi, start + pos));
1794 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1795 rc = falcon_spi_cmd(spi, command, start + pos,
1796 buffer + pos, NULL, block_len);
1800 rc = falcon_spi_wait_write(spi);
1804 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1805 rc = falcon_spi_cmd(spi, command, start + pos,
1806 NULL, verify_buffer, block_len);
1807 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1814 /* Avoid locking up the system */
1816 if (signal_pending(current)) {
1827 /**************************************************************************
1831 **************************************************************************
1834 static int falcon_reset_macs(struct efx_nic *efx)
1839 if (falcon_rev(efx) < FALCON_REV_B0) {
1840 /* It's not safe to use GLB_CTL_REG to reset the
1841 * macs, so instead use the internal MAC resets
1843 if (!EFX_IS10G(efx)) {
1844 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1845 efx_writeo(efx, ®, FR_AB_GM_CFG1);
1848 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1849 efx_writeo(efx, ®, FR_AB_GM_CFG1);
1853 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1854 efx_writeo(efx, ®, FR_AB_XM_GLB_CFG);
1856 for (count = 0; count < 10000; count++) {
1857 efx_reado(efx, ®, FR_AB_XM_GLB_CFG);
1858 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1864 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1869 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1870 * the drain sequence with the statistics fetch */
1871 efx_stats_disable(efx);
1873 efx_reado(efx, ®, FR_AB_MAC_CTRL);
1874 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
1875 efx_writeo(efx, ®, FR_AB_MAC_CTRL);
1877 efx_reado(efx, ®, FR_AB_GLB_CTL);
1878 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1879 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1880 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1881 efx_writeo(efx, ®, FR_AB_GLB_CTL);
1885 efx_reado(efx, ®, FR_AB_GLB_CTL);
1886 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1887 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1888 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1889 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1894 EFX_ERR(efx, "MAC reset failed\n");
1901 efx_stats_enable(efx);
1903 /* If we've reset the EM block and the link is up, then
1904 * we'll have to kick the XAUI link so the PHY can recover */
1905 if (efx->link_state.up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1906 falcon_reset_xaui(efx);
1911 void falcon_drain_tx_fifo(struct efx_nic *efx)
1915 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1916 (efx->loopback_mode != LOOPBACK_NONE))
1919 efx_reado(efx, ®, FR_AB_MAC_CTRL);
1920 /* There is no point in draining more than once */
1921 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1924 falcon_reset_macs(efx);
1927 void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1931 if (falcon_rev(efx) < FALCON_REV_B0)
1934 /* Isolate the MAC -> RX */
1935 efx_reado(efx, ®, FR_AZ_RX_CFG);
1936 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1937 efx_writeo(efx, ®, FR_AZ_RX_CFG);
1939 if (!efx->link_state.up)
1940 falcon_drain_tx_fifo(efx);
1943 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1945 struct efx_link_state *link_state = &efx->link_state;
1950 switch (link_state->speed) {
1951 case 10000: link_speed = 3; break;
1952 case 1000: link_speed = 2; break;
1953 case 100: link_speed = 1; break;
1954 default: link_speed = 0; break;
1956 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1957 * as advertised. Disable to ensure packets are not
1958 * indefinitely held and TX queue can be flushed at any point
1959 * while the link is down. */
1960 EFX_POPULATE_OWORD_5(reg,
1961 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1962 FRF_AB_MAC_BCAD_ACPT, 1,
1963 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1964 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1965 FRF_AB_MAC_SPEED, link_speed);
1966 /* On B0, MAC backpressure can be disabled and packets get
1968 if (falcon_rev(efx) >= FALCON_REV_B0) {
1969 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1973 efx_writeo(efx, ®, FR_AB_MAC_CTRL);
1975 /* Restore the multicast hash registers. */
1976 falcon_set_multicast_hash(efx);
1978 /* Transmission of pause frames when RX crosses the threshold is
1979 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1980 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1981 tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
1982 efx_reado(efx, ®, FR_AZ_RX_CFG);
1983 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
1985 /* Unisolate the MAC -> RX */
1986 if (falcon_rev(efx) >= FALCON_REV_B0)
1987 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1988 efx_writeo(efx, ®, FR_AZ_RX_CFG);
1991 int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1997 if (disable_dma_stats)
2000 /* Statistics fetch will fail if the MAC is in TX drain */
2001 if (falcon_rev(efx) >= FALCON_REV_B0) {
2003 efx_reado(efx, &temp, FR_AB_MAC_CTRL);
2004 if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN))
2008 dma_done = (efx->stats_buffer.addr + done_offset);
2009 *dma_done = FALCON_STATS_NOT_DONE;
2010 wmb(); /* ensure done flag is clear */
2012 /* Initiate DMA transfer of stats */
2013 EFX_POPULATE_OWORD_2(reg,
2014 FRF_AB_MAC_STAT_DMA_CMD, 1,
2015 FRF_AB_MAC_STAT_DMA_ADR,
2016 efx->stats_buffer.dma_addr);
2017 efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA);
2019 /* Wait for transfer to complete */
2020 for (i = 0; i < 400; i++) {
2021 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
2022 rmb(); /* Ensure the stats are valid. */
2028 EFX_ERR(efx, "timed out waiting for statistics\n");
2032 /**************************************************************************
2034 * PHY access via GMII
2036 **************************************************************************
2039 /* Wait for GMII access to complete */
2040 static int falcon_gmii_wait(struct efx_nic *efx)
2042 efx_oword_t md_stat;
2045 /* wait upto 50ms - taken max from datasheet */
2046 for (count = 0; count < 5000; count++) {
2047 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
2048 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
2049 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
2050 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2051 EFX_ERR(efx, "error from GMII access "
2053 EFX_OWORD_VAL(md_stat));
2060 EFX_ERR(efx, "timed out waiting for GMII\n");
2064 /* Write an MDIO register of a PHY connected to Falcon. */
2065 static int falcon_mdio_write(struct net_device *net_dev,
2066 int prtad, int devad, u16 addr, u16 value)
2068 struct efx_nic *efx = netdev_priv(net_dev);
2072 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
2073 prtad, devad, addr, value);
2075 spin_lock_bh(&efx->phy_lock);
2077 /* Check MDIO not currently being accessed */
2078 rc = falcon_gmii_wait(efx);
2082 /* Write the address/ID register */
2083 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2084 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
2086 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2087 FRF_AB_MD_DEV_ADR, devad);
2088 efx_writeo(efx, ®, FR_AB_MD_ID);
2091 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2092 efx_writeo(efx, ®, FR_AB_MD_TXD);
2094 EFX_POPULATE_OWORD_2(reg,
2097 efx_writeo(efx, ®, FR_AB_MD_CS);
2099 /* Wait for data to be written */
2100 rc = falcon_gmii_wait(efx);
2102 /* Abort the write operation */
2103 EFX_POPULATE_OWORD_2(reg,
2106 efx_writeo(efx, ®, FR_AB_MD_CS);
2111 spin_unlock_bh(&efx->phy_lock);
2115 /* Read an MDIO register of a PHY connected to Falcon. */
2116 static int falcon_mdio_read(struct net_device *net_dev,
2117 int prtad, int devad, u16 addr)
2119 struct efx_nic *efx = netdev_priv(net_dev);
2123 spin_lock_bh(&efx->phy_lock);
2125 /* Check MDIO not currently being accessed */
2126 rc = falcon_gmii_wait(efx);
2130 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2131 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
2133 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2134 FRF_AB_MD_DEV_ADR, devad);
2135 efx_writeo(efx, ®, FR_AB_MD_ID);
2137 /* Request data to be read */
2138 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2139 efx_writeo(efx, ®, FR_AB_MD_CS);
2141 /* Wait for data to become available */
2142 rc = falcon_gmii_wait(efx);
2144 efx_reado(efx, ®, FR_AB_MD_RXD);
2145 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2146 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2147 prtad, devad, addr, rc);
2149 /* Abort the read operation */
2150 EFX_POPULATE_OWORD_2(reg,
2153 efx_writeo(efx, ®, FR_AB_MD_CS);
2155 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2156 prtad, devad, addr, rc);
2160 spin_unlock_bh(&efx->phy_lock);
2164 int falcon_switch_mac(struct efx_nic *efx)
2166 struct efx_mac_operations *old_mac_op = efx->mac_op;
2167 efx_oword_t nic_stat;
2171 /* Don't try to fetch MAC stats while we're switching MACs */
2172 efx_stats_disable(efx);
2174 /* Internal loopbacks override the phy speed setting */
2175 if (efx->loopback_mode == LOOPBACK_GMAC) {
2176 efx->link_state.speed = 1000;
2177 efx->link_state.fd = true;
2178 } else if (LOOPBACK_INTERNAL(efx)) {
2179 efx->link_state.speed = 10000;
2180 efx->link_state.fd = true;
2183 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2184 efx->mac_op = (EFX_IS10G(efx) ?
2185 &falcon_xmac_operations : &falcon_gmac_operations);
2187 /* Always push the NIC_STAT_REG setting even if the mac hasn't
2188 * changed, because this function is run post online reset */
2189 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2190 strap_val = EFX_IS10G(efx) ? 5 : 3;
2191 if (falcon_rev(efx) >= FALCON_REV_B0) {
2192 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
2193 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
2194 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
2196 /* Falcon A1 does not support 1G/10G speed switching
2197 * and must not be used with a PHY that does. */
2198 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
2202 if (old_mac_op == efx->mac_op)
2205 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2206 /* Not all macs support a mac-level link state */
2209 rc = falcon_reset_macs(efx);
2211 efx_stats_enable(efx);
2215 /* This call is responsible for hooking in the MAC and PHY operations */
2216 int falcon_probe_port(struct efx_nic *efx)
2220 switch (efx->phy_type) {
2221 case PHY_TYPE_SFX7101:
2222 efx->phy_op = &falcon_sfx7101_phy_ops;
2224 case PHY_TYPE_SFT9001A:
2225 case PHY_TYPE_SFT9001B:
2226 efx->phy_op = &falcon_sft9001_phy_ops;
2228 case PHY_TYPE_QT2022C2:
2229 case PHY_TYPE_QT2025C:
2230 efx->phy_op = &falcon_qt202x_phy_ops;
2233 EFX_ERR(efx, "Unknown PHY type %d\n",
2238 if (efx->phy_op->macs & EFX_XMAC)
2239 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2240 (1 << LOOPBACK_XGXS) |
2241 (1 << LOOPBACK_XAUI));
2242 if (efx->phy_op->macs & EFX_GMAC)
2243 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2244 efx->loopback_modes |= efx->phy_op->loopbacks;
2246 /* Set up MDIO structure for PHY */
2247 efx->mdio.mmds = efx->phy_op->mmds;
2248 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
2249 efx->mdio.mdio_read = falcon_mdio_read;
2250 efx->mdio.mdio_write = falcon_mdio_write;
2252 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2253 if (falcon_rev(efx) >= FALCON_REV_B0)
2254 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2256 efx->wanted_fc = EFX_FC_RX;
2258 /* Allocate buffer for stats */
2259 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2260 FALCON_MAC_STATS_SIZE);
2263 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
2264 (u64)efx->stats_buffer.dma_addr,
2265 efx->stats_buffer.addr,
2266 (u64)virt_to_phys(efx->stats_buffer.addr));
2271 void falcon_remove_port(struct efx_nic *efx)
2273 falcon_free_buffer(efx, &efx->stats_buffer);
2276 /**************************************************************************
2278 * Multicast filtering
2280 **************************************************************************
2283 void falcon_set_multicast_hash(struct efx_nic *efx)
2285 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2287 /* Broadcast packets go through the multicast hash filter.
2288 * ether_crc_le() of the broadcast address is 0xbe2612ff
2289 * so we always add bit 0xff to the mask.
2291 set_bit_le(0xff, mc_hash->byte);
2293 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
2294 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
2298 /**************************************************************************
2302 **************************************************************************/
2304 int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2306 struct falcon_nvconfig *nvconfig;
2307 struct efx_spi_device *spi;
2309 int rc, magic_num, struct_ver;
2310 __le16 *word, *limit;
2313 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2317 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2320 nvconfig = region + FALCON_NVCONFIG_OFFSET;
2322 mutex_lock(&efx->spi_lock);
2323 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2324 mutex_unlock(&efx->spi_lock);
2326 EFX_ERR(efx, "Failed to read %s\n",
2327 efx->spi_flash ? "flash" : "EEPROM");
2332 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2333 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2336 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
2337 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2340 if (struct_ver < 2) {
2341 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
2343 } else if (struct_ver < 4) {
2344 word = &nvconfig->board_magic_num;
2345 limit = (__le16 *) (nvconfig + 1);
2348 limit = region + FALCON_NVCONFIG_END;
2350 for (csum = 0; word < limit; ++word)
2351 csum += le16_to_cpu(*word);
2353 if (~csum & 0xffff) {
2354 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
2360 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
2367 /* Registers tested in the falcon register test */
2371 } efx_test_registers[] = {
2373 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2375 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2377 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2378 { FR_AZ_TX_RESERVED,
2379 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2381 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2382 { FR_AZ_SRM_TX_DC_CFG,
2383 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2385 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2386 { FR_AZ_RX_DC_PF_WM,
2387 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2389 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2391 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2393 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2395 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2397 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2399 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2400 { FR_AB_XM_RX_PARAM,
2401 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2403 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2405 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2407 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2410 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2411 const efx_oword_t *mask)
2413 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
2414 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2417 int falcon_test_registers(struct efx_nic *efx)
2419 unsigned address = 0, i, j;
2420 efx_oword_t mask, imask, original, reg, buf;
2422 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2423 WARN_ON(!LOOPBACK_INTERNAL(efx));
2425 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2426 address = efx_test_registers[i].address;
2427 mask = imask = efx_test_registers[i].mask;
2428 EFX_INVERT_OWORD(imask);
2430 efx_reado(efx, &original, address);
2432 /* bit sweep on and off */
2433 for (j = 0; j < 128; j++) {
2434 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2437 /* Test this testable bit can be set in isolation */
2438 EFX_AND_OWORD(reg, original, mask);
2439 EFX_SET_OWORD32(reg, j, j, 1);
2441 efx_writeo(efx, ®, address);
2442 efx_reado(efx, &buf, address);
2444 if (efx_masked_compare_oword(®, &buf, &mask))
2447 /* Test this testable bit can be cleared in isolation */
2448 EFX_OR_OWORD(reg, original, mask);
2449 EFX_SET_OWORD32(reg, j, j, 0);
2451 efx_writeo(efx, ®, address);
2452 efx_reado(efx, &buf, address);
2454 if (efx_masked_compare_oword(®, &buf, &mask))
2458 efx_writeo(efx, &original, address);
2464 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2465 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2466 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2470 /**************************************************************************
2474 **************************************************************************
2477 /* Resets NIC to known state. This routine must be called in process
2478 * context and is allowed to sleep. */
2479 int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2481 struct falcon_nic_data *nic_data = efx->nic_data;
2482 efx_oword_t glb_ctl_reg_ker;
2485 EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
2487 /* Initiate device reset */
2488 if (method == RESET_TYPE_WORLD) {
2489 rc = pci_save_state(efx->pci_dev);
2491 EFX_ERR(efx, "failed to backup PCI state of primary "
2492 "function prior to hardware reset\n");
2495 if (FALCON_IS_DUAL_FUNC(efx)) {
2496 rc = pci_save_state(nic_data->pci_dev2);
2498 EFX_ERR(efx, "failed to backup PCI state of "
2499 "secondary function prior to "
2500 "hardware reset\n");
2505 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2506 FRF_AB_EXT_PHY_RST_DUR,
2507 FFE_AB_EXT_PHY_RST_DUR_10240US,
2510 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2511 /* exclude PHY from "invisible" reset */
2512 FRF_AB_EXT_PHY_RST_CTL,
2513 method == RESET_TYPE_INVISIBLE,
2514 /* exclude EEPROM/flash and PCIe */
2515 FRF_AB_PCIE_CORE_RST_CTL, 1,
2516 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2517 FRF_AB_PCIE_SD_RST_CTL, 1,
2518 FRF_AB_EE_RST_CTL, 1,
2519 FRF_AB_EXT_PHY_RST_DUR,
2520 FFE_AB_EXT_PHY_RST_DUR_10240US,
2523 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2525 EFX_LOG(efx, "waiting for hardware reset\n");
2526 schedule_timeout_uninterruptible(HZ / 20);
2528 /* Restore PCI configuration if needed */
2529 if (method == RESET_TYPE_WORLD) {
2530 if (FALCON_IS_DUAL_FUNC(efx)) {
2531 rc = pci_restore_state(nic_data->pci_dev2);
2533 EFX_ERR(efx, "failed to restore PCI config for "
2534 "the secondary function\n");
2538 rc = pci_restore_state(efx->pci_dev);
2540 EFX_ERR(efx, "failed to restore PCI config for the "
2541 "primary function\n");
2544 EFX_LOG(efx, "successfully restored PCI config\n");
2547 /* Assert that reset complete */
2548 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2549 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2551 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2554 EFX_LOG(efx, "hardware reset complete\n");
2558 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2561 pci_restore_state(efx->pci_dev);
2568 /* Zeroes out the SRAM contents. This routine must be called in
2569 * process context and is allowed to sleep.
2571 static int falcon_reset_sram(struct efx_nic *efx)
2573 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2576 /* Set the SRAM wake/sleep GPIO appropriately. */
2577 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2578 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2579 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2580 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2582 /* Initiate SRAM reset */
2583 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2584 FRF_AZ_SRM_INIT_EN, 1,
2585 FRF_AZ_SRM_NB_SZ, 0);
2586 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2588 /* Wait for SRAM reset to complete */
2591 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2593 /* SRAM reset is slow; expect around 16ms */
2594 schedule_timeout_uninterruptible(HZ / 50);
2596 /* Check for reset complete */
2597 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2598 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2599 EFX_LOG(efx, "SRAM reset complete\n");
2603 } while (++count < 20); /* wait upto 0.4 sec */
2605 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2609 static int falcon_spi_device_init(struct efx_nic *efx,
2610 struct efx_spi_device **spi_device_ret,
2611 unsigned int device_id, u32 device_type)
2613 struct efx_spi_device *spi_device;
2615 if (device_type != 0) {
2616 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2619 spi_device->device_id = device_id;
2621 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2622 spi_device->addr_len =
2623 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2624 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2625 spi_device->addr_len == 1);
2626 spi_device->erase_command =
2627 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2628 spi_device->erase_size =
2629 1 << SPI_DEV_TYPE_FIELD(device_type,
2630 SPI_DEV_TYPE_ERASE_SIZE);
2631 spi_device->block_size =
2632 1 << SPI_DEV_TYPE_FIELD(device_type,
2633 SPI_DEV_TYPE_BLOCK_SIZE);
2635 spi_device->efx = efx;
2640 kfree(*spi_device_ret);
2641 *spi_device_ret = spi_device;
2646 static void falcon_remove_spi_devices(struct efx_nic *efx)
2648 kfree(efx->spi_eeprom);
2649 efx->spi_eeprom = NULL;
2650 kfree(efx->spi_flash);
2651 efx->spi_flash = NULL;
2654 /* Extract non-volatile configuration */
2655 static int falcon_probe_nvconfig(struct efx_nic *efx)
2657 struct falcon_nvconfig *nvconfig;
2661 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2665 rc = falcon_read_nvram(efx, nvconfig);
2666 if (rc == -EINVAL) {
2667 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2668 efx->phy_type = PHY_TYPE_NONE;
2669 efx->mdio.prtad = MDIO_PRTAD_NONE;
2675 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2676 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2678 efx->phy_type = v2->port0_phy_type;
2679 efx->mdio.prtad = v2->port0_phy_addr;
2680 board_rev = le16_to_cpu(v2->board_revision);
2682 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2683 rc = falcon_spi_device_init(
2684 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2685 le32_to_cpu(v3->spi_device_type
2686 [FFE_AB_SPI_DEVICE_FLASH]));
2689 rc = falcon_spi_device_init(
2690 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2691 le32_to_cpu(v3->spi_device_type
2692 [FFE_AB_SPI_DEVICE_EEPROM]));
2698 /* Read the MAC addresses */
2699 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2701 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2703 falcon_probe_board(efx, board_rev);
2709 falcon_remove_spi_devices(efx);
2715 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2716 * count, port speed). Set workaround and feature flags accordingly.
2718 static int falcon_probe_nic_variant(struct efx_nic *efx)
2720 efx_oword_t altera_build;
2721 efx_oword_t nic_stat;
2723 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2724 if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2725 EFX_ERR(efx, "Falcon FPGA not supported\n");
2729 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2731 switch (falcon_rev(efx)) {
2734 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2738 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2739 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2748 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2752 /* Initial assumed speed */
2753 efx->link_state.speed = EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) ? 10000 : 1000;
2758 /* Probe all SPI devices on the NIC */
2759 static void falcon_probe_spi_devices(struct efx_nic *efx)
2761 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2764 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2765 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2766 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2768 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2769 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2770 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2771 EFX_LOG(efx, "Booted from %s\n",
2772 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2774 /* Disable VPD and set clock dividers to safe
2775 * values for initial programming. */
2777 EFX_LOG(efx, "Booted from internal ASIC settings;"
2778 " setting SPI config\n");
2779 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2780 /* 125 MHz / 7 ~= 20 MHz */
2781 FRF_AB_EE_SF_CLOCK_DIV, 7,
2782 /* 125 MHz / 63 ~= 2 MHz */
2783 FRF_AB_EE_EE_CLOCK_DIV, 63);
2784 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2787 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2788 falcon_spi_device_init(efx, &efx->spi_flash,
2789 FFE_AB_SPI_DEVICE_FLASH,
2790 default_flash_type);
2791 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2792 falcon_spi_device_init(efx, &efx->spi_eeprom,
2793 FFE_AB_SPI_DEVICE_EEPROM,
2797 int falcon_probe_nic(struct efx_nic *efx)
2799 struct falcon_nic_data *nic_data;
2800 struct falcon_board *board;
2803 /* Allocate storage for hardware specific data */
2804 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2807 efx->nic_data = nic_data;
2809 /* Determine number of ports etc. */
2810 rc = falcon_probe_nic_variant(efx);
2814 /* Probe secondary function if expected */
2815 if (FALCON_IS_DUAL_FUNC(efx)) {
2816 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2818 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2820 if (dev->bus == efx->pci_dev->bus &&
2821 dev->devfn == efx->pci_dev->devfn + 1) {
2822 nic_data->pci_dev2 = dev;
2826 if (!nic_data->pci_dev2) {
2827 EFX_ERR(efx, "failed to find secondary function\n");
2833 /* Now we can reset the NIC */
2834 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2836 EFX_ERR(efx, "failed to reset NIC\n");
2840 /* Allocate memory for INT_KER */
2841 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2844 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2846 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
2847 (u64)efx->irq_status.dma_addr,
2848 efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
2850 falcon_probe_spi_devices(efx);
2852 /* Read in the non-volatile configuration */
2853 rc = falcon_probe_nvconfig(efx);
2857 /* Initialise I2C adapter */
2858 board = falcon_board(efx);
2859 board->i2c_adap.owner = THIS_MODULE;
2860 board->i2c_data = falcon_i2c_bit_operations;
2861 board->i2c_data.data = efx;
2862 board->i2c_adap.algo_data = &board->i2c_data;
2863 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2864 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2865 sizeof(board->i2c_adap.name));
2866 rc = i2c_bit_add_bus(&board->i2c_adap);
2870 rc = falcon_board(efx)->init(efx);
2872 EFX_ERR(efx, "failed to initialise board\n");
2879 BUG_ON(i2c_del_adapter(&board->i2c_adap));
2880 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2882 falcon_remove_spi_devices(efx);
2883 falcon_free_buffer(efx, &efx->irq_status);
2886 if (nic_data->pci_dev2) {
2887 pci_dev_put(nic_data->pci_dev2);
2888 nic_data->pci_dev2 = NULL;
2892 kfree(efx->nic_data);
2896 static void falcon_init_rx_cfg(struct efx_nic *efx)
2898 /* Prior to Siena the RX DMA engine will split each frame at
2899 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2900 * be so large that that never happens. */
2901 const unsigned huge_buf_size = (3 * 4096) >> 5;
2902 /* RX control FIFO thresholds (32 entries) */
2903 const unsigned ctrl_xon_thr = 20;
2904 const unsigned ctrl_xoff_thr = 25;
2905 /* RX data FIFO thresholds (256-byte units; size varies) */
2906 int data_xon_thr = rx_xon_thresh_bytes >> 8;
2907 int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
2910 efx_reado(efx, ®, FR_AZ_RX_CFG);
2911 if (falcon_rev(efx) <= FALCON_REV_A1) {
2912 /* Data FIFO size is 5.5K */
2913 if (data_xon_thr < 0)
2914 data_xon_thr = 512 >> 8;
2915 if (data_xoff_thr < 0)
2916 data_xoff_thr = 2048 >> 8;
2917 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2918 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2920 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
2921 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
2922 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2923 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2925 /* Data FIFO size is 80K; register fields moved */
2926 if (data_xon_thr < 0)
2927 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
2928 if (data_xoff_thr < 0)
2929 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
2930 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2931 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2933 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
2934 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
2935 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2936 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2937 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2939 efx_writeo(efx, ®, FR_AZ_RX_CFG);
2942 /* This call performs hardware-specific global initialisation, such as
2943 * defining the descriptor cache sizes and number of RSS channels.
2944 * It does not set up any buffers, descriptor rings or event queues.
2946 int falcon_init_nic(struct efx_nic *efx)
2951 /* Use on-chip SRAM */
2952 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2953 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2954 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2956 /* Set the source of the GMAC clock */
2957 if (falcon_rev(efx) == FALCON_REV_B0) {
2958 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
2959 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
2960 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
2963 rc = falcon_reset_sram(efx);
2967 /* Set positions of descriptor caches in SRAM. */
2968 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2969 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
2970 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2971 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
2973 /* Set TX descriptor cache size. */
2974 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
2975 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2976 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
2978 /* Set RX descriptor cache size. Set low watermark to size-8, as
2979 * this allows most efficient prefetching.
2981 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
2982 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2983 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
2984 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2985 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
2987 /* Clear the parity enables on the TX data fifos as
2988 * they produce false parity errors because of timing issues
2990 if (EFX_WORKAROUND_5129(efx)) {
2991 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2992 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2993 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2996 /* Enable all the genuinely fatal interrupts. (They are still
2997 * masked by the overall interrupt mask, controlled by
2998 * falcon_interrupts()).
3000 * Note: All other fatal interrupts are enabled
3002 EFX_POPULATE_OWORD_3(temp,
3003 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
3004 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
3005 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
3006 EFX_INVERT_OWORD(temp);
3007 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
3009 if (EFX_WORKAROUND_7244(efx)) {
3010 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
3011 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
3012 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
3013 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
3014 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
3015 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
3018 falcon_setup_rss_indir_table(efx);
3020 /* XXX This is documented only for Falcon A0/A1 */
3021 /* Setup RX. Wait for descriptor is broken and must
3022 * be disabled. RXDP recovery shouldn't be needed, but is.
3024 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3025 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
3026 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3027 if (EFX_WORKAROUND_5583(efx))
3028 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3029 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3031 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3032 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3034 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3035 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3036 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3037 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3038 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3039 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3040 /* Enable SW_EV to inherit in char driver - assume harmless here */
3041 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3042 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3043 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3044 /* Squash TX of packets of 16 bytes or less */
3045 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3046 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3047 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3049 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3050 * descriptors (which is bad).
3052 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3053 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3054 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3056 falcon_init_rx_cfg(efx);
3058 /* Set destination of both TX and RX Flush events */
3059 if (falcon_rev(efx) >= FALCON_REV_B0) {
3060 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3061 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3067 void falcon_remove_nic(struct efx_nic *efx)
3069 struct falcon_nic_data *nic_data = efx->nic_data;
3070 struct falcon_board *board = falcon_board(efx);
3073 falcon_board(efx)->fini(efx);
3075 /* Remove I2C adapter and clear it in preparation for a retry */
3076 rc = i2c_del_adapter(&board->i2c_adap);
3078 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
3080 falcon_remove_spi_devices(efx);
3081 falcon_free_buffer(efx, &efx->irq_status);
3083 falcon_reset_hw(efx, RESET_TYPE_ALL);
3085 /* Release the second function after the reset */
3086 if (nic_data->pci_dev2) {
3087 pci_dev_put(nic_data->pci_dev2);
3088 nic_data->pci_dev2 = NULL;
3091 /* Tear down the private nic state */
3092 kfree(efx->nic_data);
3093 efx->nic_data = NULL;
3096 void falcon_update_nic_stats(struct efx_nic *efx)
3100 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3101 efx->n_rx_nodesc_drop_cnt +=
3102 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
3105 /**************************************************************************
3107 * Revision-dependent attributes used by efx.c
3109 **************************************************************************
3112 struct efx_nic_type falcon_a_nic_type = {
3113 .mem_map_size = 0x20000,
3114 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
3115 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
3116 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3117 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3118 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3119 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3120 .rx_buffer_padding = 0x24,
3121 .max_interrupt_mode = EFX_INT_MODE_MSI,
3122 .phys_addr_channels = 4,
3125 struct efx_nic_type falcon_b_nic_type = {
3126 /* Map everything up to and including the RSS indirection
3127 * table. Don't map MSI-X table, MSI-X PBA since Linux
3128 * requires that they not be mapped. */
3129 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
3130 FR_BZ_RX_INDIRECTION_TBL_STEP *
3131 FR_BZ_RX_INDIRECTION_TBL_ROWS),
3132 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
3133 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
3134 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3135 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3136 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3137 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3138 .rx_buffer_padding = 0,
3139 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3140 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
3141 * interrupt handler only supports 32