1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/i2c-algo-bit.h>
18 #include <linux/mii.h>
19 #include "net_driver.h"
29 #include "workarounds.h"
31 /* Falcon hardware control.
32 * Falcon is the internal codename for the SFC4000 controller that is
33 * present in SFE400X evaluation boards
37 * struct falcon_nic_data - Falcon NIC state
38 * @next_buffer_table: First available buffer table id
39 * @pci_dev2: The secondary PCI device if present
40 * @i2c_data: Operations and state for I2C bit-bashing algorithm
41 * @int_error_count: Number of internal errors seen recently
42 * @int_error_expire: Time at which error count will be expired
44 struct falcon_nic_data {
45 unsigned next_buffer_table;
46 struct pci_dev *pci_dev2;
47 struct i2c_algo_bit_data i2c_data;
49 unsigned int_error_count;
50 unsigned long int_error_expire;
53 /**************************************************************************
57 **************************************************************************
60 static int disable_dma_stats;
62 /* This is set to 16 for a good reason. In summary, if larger than
63 * 16, the descriptor cache holds more than a default socket
64 * buffer's worth of packets (for UDP we can only have at most one
65 * socket buffer's worth outstanding). This combined with the fact
66 * that we only get 1 TX event per descriptor cache means the NIC
69 #define TX_DC_ENTRIES 16
70 #define TX_DC_ENTRIES_ORDER 0
71 #define TX_DC_BASE 0x130000
73 #define RX_DC_ENTRIES 64
74 #define RX_DC_ENTRIES_ORDER 2
75 #define RX_DC_BASE 0x100000
77 static const unsigned int
78 /* "Large" EEPROM device: Atmel AT25640 or similar
79 * 8 KB, 16-bit address, 32 B write block */
80 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
81 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
82 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
83 /* Default flash device: Atmel AT25F1024
84 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
85 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
86 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
87 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
88 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
89 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
91 /* RX FIFO XOFF watermark
93 * When the amount of the RX FIFO increases used increases past this
94 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
95 * This also has an effect on RX/TX arbitration
97 static int rx_xoff_thresh_bytes = -1;
98 module_param(rx_xoff_thresh_bytes, int, 0644);
99 MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
101 /* RX FIFO XON watermark
103 * When the amount of the RX FIFO used decreases below this
104 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
105 * This also has an effect on RX/TX arbitration
107 static int rx_xon_thresh_bytes = -1;
108 module_param(rx_xon_thresh_bytes, int, 0644);
109 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
111 /* If FALCON_MAX_INT_ERRORS internal errors occur within
112 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
115 #define FALCON_INT_ERROR_EXPIRE 3600
116 #define FALCON_MAX_INT_ERRORS 5
118 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
120 #define FALCON_FLUSH_INTERVAL 10
121 #define FALCON_FLUSH_POLL_COUNT 100
123 /**************************************************************************
127 **************************************************************************
130 /* Size and alignment of special buffers (4KB) */
131 #define FALCON_BUF_SIZE 4096
133 /* Dummy SRAM size code */
134 #define SRM_NB_BSZ_ONCHIP_ONLY (-1)
136 #define FALCON_IS_DUAL_FUNC(efx) \
137 (falcon_rev(efx) < FALCON_REV_B0)
139 /**************************************************************************
141 * Falcon hardware access
143 **************************************************************************/
145 static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
148 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
152 /* Read the current event from the event queue */
153 static inline efx_qword_t *falcon_event(struct efx_channel *channel,
156 return (((efx_qword_t *) (channel->eventq.addr)) + index);
159 /* See if an event is present
161 * We check both the high and low dword of the event for all ones. We
162 * wrote all ones when we cleared the event, and no valid event can
163 * have all ones in either its high or low dwords. This approach is
164 * robust against reordering.
166 * Note that using a single 64-bit comparison is incorrect; even
167 * though the CPU read will be atomic, the DMA write may not be.
169 static inline int falcon_event_present(efx_qword_t *event)
171 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
172 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
175 /**************************************************************************
177 * I2C bus - this is a bit-bashing interface using GPIO pins
178 * Note that it uses the output enables to tristate the outputs
179 * SDA is the data pin and SCL is the clock
181 **************************************************************************
183 static void falcon_setsda(void *data, int state)
185 struct efx_nic *efx = (struct efx_nic *)data;
188 efx_reado(efx, ®, FR_AB_GPIO_CTL);
189 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
190 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
193 static void falcon_setscl(void *data, int state)
195 struct efx_nic *efx = (struct efx_nic *)data;
198 efx_reado(efx, ®, FR_AB_GPIO_CTL);
199 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
200 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
203 static int falcon_getsda(void *data)
205 struct efx_nic *efx = (struct efx_nic *)data;
208 efx_reado(efx, ®, FR_AB_GPIO_CTL);
209 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
212 static int falcon_getscl(void *data)
214 struct efx_nic *efx = (struct efx_nic *)data;
217 efx_reado(efx, ®, FR_AB_GPIO_CTL);
218 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
221 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
222 .setsda = falcon_setsda,
223 .setscl = falcon_setscl,
224 .getsda = falcon_getsda,
225 .getscl = falcon_getscl,
227 /* Wait up to 50 ms for slave to let us pull SCL high */
228 .timeout = DIV_ROUND_UP(HZ, 20),
231 /**************************************************************************
233 * Falcon special buffer handling
234 * Special buffers are used for event queues and the TX and RX
237 *************************************************************************/
240 * Initialise a Falcon special buffer
242 * This will define a buffer (previously allocated via
243 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
244 * it to be used for event queues, descriptor rings etc.
247 falcon_init_special_buffer(struct efx_nic *efx,
248 struct efx_special_buffer *buffer)
250 efx_qword_t buf_desc;
255 EFX_BUG_ON_PARANOID(!buffer->addr);
257 /* Write buffer descriptors to NIC */
258 for (i = 0; i < buffer->entries; i++) {
259 index = buffer->index + i;
260 dma_addr = buffer->dma_addr + (i * 4096);
261 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
262 index, (unsigned long long)dma_addr);
263 EFX_POPULATE_QWORD_3(buf_desc,
264 FRF_AZ_BUF_ADR_REGION, 0,
265 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
266 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
267 falcon_write_buf_tbl(efx, &buf_desc, index);
271 /* Unmaps a buffer from Falcon and clears the buffer table entries */
273 falcon_fini_special_buffer(struct efx_nic *efx,
274 struct efx_special_buffer *buffer)
276 efx_oword_t buf_tbl_upd;
277 unsigned int start = buffer->index;
278 unsigned int end = (buffer->index + buffer->entries - 1);
280 if (!buffer->entries)
283 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
284 buffer->index, buffer->index + buffer->entries - 1);
286 EFX_POPULATE_OWORD_4(buf_tbl_upd,
287 FRF_AZ_BUF_UPD_CMD, 0,
288 FRF_AZ_BUF_CLR_CMD, 1,
289 FRF_AZ_BUF_CLR_END_ID, end,
290 FRF_AZ_BUF_CLR_START_ID, start);
291 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
295 * Allocate a new Falcon special buffer
297 * This allocates memory for a new buffer, clears it and allocates a
298 * new buffer ID range. It does not write into Falcon's buffer table.
300 * This call will allocate 4KB buffers, since Falcon can't use 8KB
301 * buffers for event queues and descriptor rings.
303 static int falcon_alloc_special_buffer(struct efx_nic *efx,
304 struct efx_special_buffer *buffer,
307 struct falcon_nic_data *nic_data = efx->nic_data;
309 len = ALIGN(len, FALCON_BUF_SIZE);
311 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
316 buffer->entries = len / FALCON_BUF_SIZE;
317 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
319 /* All zeros is a potentially valid event so memset to 0xff */
320 memset(buffer->addr, 0xff, len);
322 /* Select new buffer ID */
323 buffer->index = nic_data->next_buffer_table;
324 nic_data->next_buffer_table += buffer->entries;
326 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
327 "(virt %p phys %llx)\n", buffer->index,
328 buffer->index + buffer->entries - 1,
329 (u64)buffer->dma_addr, len,
330 buffer->addr, (u64)virt_to_phys(buffer->addr));
335 static void falcon_free_special_buffer(struct efx_nic *efx,
336 struct efx_special_buffer *buffer)
341 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
342 "(virt %p phys %llx)\n", buffer->index,
343 buffer->index + buffer->entries - 1,
344 (u64)buffer->dma_addr, buffer->len,
345 buffer->addr, (u64)virt_to_phys(buffer->addr));
347 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
353 /**************************************************************************
355 * Falcon generic buffer handling
356 * These buffers are used for interrupt status and MAC stats
358 **************************************************************************/
360 static int falcon_alloc_buffer(struct efx_nic *efx,
361 struct efx_buffer *buffer, unsigned int len)
363 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
368 memset(buffer->addr, 0, len);
372 static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
375 pci_free_consistent(efx->pci_dev, buffer->len,
376 buffer->addr, buffer->dma_addr);
381 /**************************************************************************
385 **************************************************************************/
387 /* Returns a pointer to the specified transmit descriptor in the TX
388 * descriptor queue belonging to the specified channel.
390 static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
393 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
396 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
397 static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
402 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
403 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
404 efx_writed_page(tx_queue->efx, ®,
405 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
409 /* For each entry inserted into the software descriptor ring, create a
410 * descriptor in the hardware TX descriptor ring (in host memory), and
413 void falcon_push_buffers(struct efx_tx_queue *tx_queue)
416 struct efx_tx_buffer *buffer;
420 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
423 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
424 buffer = &tx_queue->buffer[write_ptr];
425 txd = falcon_tx_desc(tx_queue, write_ptr);
426 ++tx_queue->write_count;
428 /* Create TX descriptor ring entry */
429 EFX_POPULATE_QWORD_4(*txd,
430 FSF_AZ_TX_KER_CONT, buffer->continuation,
431 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
432 FSF_AZ_TX_KER_BUF_REGION, 0,
433 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
434 } while (tx_queue->write_count != tx_queue->insert_count);
436 wmb(); /* Ensure descriptors are written before they are fetched */
437 falcon_notify_tx_desc(tx_queue);
440 /* Allocate hardware resources for a TX queue */
441 int falcon_probe_tx(struct efx_tx_queue *tx_queue)
443 struct efx_nic *efx = tx_queue->efx;
444 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
445 EFX_TXQ_SIZE & EFX_TXQ_MASK);
446 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
447 EFX_TXQ_SIZE * sizeof(efx_qword_t));
450 void falcon_init_tx(struct efx_tx_queue *tx_queue)
452 efx_oword_t tx_desc_ptr;
453 struct efx_nic *efx = tx_queue->efx;
455 tx_queue->flushed = false;
457 /* Pin TX descriptor ring */
458 falcon_init_special_buffer(efx, &tx_queue->txd);
460 /* Push TX descriptor ring to card */
461 EFX_POPULATE_OWORD_10(tx_desc_ptr,
462 FRF_AZ_TX_DESCQ_EN, 1,
463 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
464 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
465 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
466 FRF_AZ_TX_DESCQ_EVQ_ID,
467 tx_queue->channel->channel,
468 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
469 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
470 FRF_AZ_TX_DESCQ_SIZE,
471 __ffs(tx_queue->txd.entries),
472 FRF_AZ_TX_DESCQ_TYPE, 0,
473 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
475 if (falcon_rev(efx) >= FALCON_REV_B0) {
476 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
477 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
478 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
482 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
485 if (falcon_rev(efx) < FALCON_REV_B0) {
488 /* Only 128 bits in this register */
489 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
491 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
492 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
493 clear_bit_le(tx_queue->queue, (void *)®);
495 set_bit_le(tx_queue->queue, (void *)®);
496 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
500 static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
502 struct efx_nic *efx = tx_queue->efx;
503 efx_oword_t tx_flush_descq;
505 /* Post a flush command */
506 EFX_POPULATE_OWORD_2(tx_flush_descq,
507 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
508 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
509 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
512 void falcon_fini_tx(struct efx_tx_queue *tx_queue)
514 struct efx_nic *efx = tx_queue->efx;
515 efx_oword_t tx_desc_ptr;
517 /* The queue should have been flushed */
518 WARN_ON(!tx_queue->flushed);
520 /* Remove TX descriptor ring from card */
521 EFX_ZERO_OWORD(tx_desc_ptr);
522 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
525 /* Unpin TX descriptor ring */
526 falcon_fini_special_buffer(efx, &tx_queue->txd);
529 /* Free buffers backing TX queue */
530 void falcon_remove_tx(struct efx_tx_queue *tx_queue)
532 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
535 /**************************************************************************
539 **************************************************************************/
541 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
542 static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
545 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
548 /* This creates an entry in the RX descriptor queue */
549 static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
552 struct efx_rx_buffer *rx_buf;
555 rxd = falcon_rx_desc(rx_queue, index);
556 rx_buf = efx_rx_buffer(rx_queue, index);
557 EFX_POPULATE_QWORD_3(*rxd,
558 FSF_AZ_RX_KER_BUF_SIZE,
560 rx_queue->efx->type->rx_buffer_padding,
561 FSF_AZ_RX_KER_BUF_REGION, 0,
562 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
565 /* This writes to the RX_DESC_WPTR register for the specified receive
568 void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
573 while (rx_queue->notified_count != rx_queue->added_count) {
574 falcon_build_rx_desc(rx_queue,
575 rx_queue->notified_count &
577 ++rx_queue->notified_count;
581 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
582 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
583 efx_writed_page(rx_queue->efx, ®,
584 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
587 int falcon_probe_rx(struct efx_rx_queue *rx_queue)
589 struct efx_nic *efx = rx_queue->efx;
590 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
591 EFX_RXQ_SIZE & EFX_RXQ_MASK);
592 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
593 EFX_RXQ_SIZE * sizeof(efx_qword_t));
596 void falcon_init_rx(struct efx_rx_queue *rx_queue)
598 efx_oword_t rx_desc_ptr;
599 struct efx_nic *efx = rx_queue->efx;
600 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
601 bool iscsi_digest_en = is_b0;
603 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
604 rx_queue->queue, rx_queue->rxd.index,
605 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
607 rx_queue->flushed = false;
609 /* Pin RX descriptor ring */
610 falcon_init_special_buffer(efx, &rx_queue->rxd);
612 /* Push RX descriptor ring to card */
613 EFX_POPULATE_OWORD_10(rx_desc_ptr,
614 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
615 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
616 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
617 FRF_AZ_RX_DESCQ_EVQ_ID,
618 rx_queue->channel->channel,
619 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
620 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
621 FRF_AZ_RX_DESCQ_SIZE,
622 __ffs(rx_queue->rxd.entries),
623 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
624 /* For >=B0 this is scatter so disable */
625 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
626 FRF_AZ_RX_DESCQ_EN, 1);
627 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
631 static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
633 struct efx_nic *efx = rx_queue->efx;
634 efx_oword_t rx_flush_descq;
636 /* Post a flush command */
637 EFX_POPULATE_OWORD_2(rx_flush_descq,
638 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
639 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
640 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
643 void falcon_fini_rx(struct efx_rx_queue *rx_queue)
645 efx_oword_t rx_desc_ptr;
646 struct efx_nic *efx = rx_queue->efx;
648 /* The queue should already have been flushed */
649 WARN_ON(!rx_queue->flushed);
651 /* Remove RX descriptor ring from card */
652 EFX_ZERO_OWORD(rx_desc_ptr);
653 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
656 /* Unpin RX descriptor ring */
657 falcon_fini_special_buffer(efx, &rx_queue->rxd);
660 /* Free buffers backing RX queue */
661 void falcon_remove_rx(struct efx_rx_queue *rx_queue)
663 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
666 /**************************************************************************
668 * Falcon event queue processing
669 * Event queues are processed by per-channel tasklets.
671 **************************************************************************/
673 /* Update a channel's event queue's read pointer (RPTR) register
675 * This writes the EVQ_RPTR_REG register for the specified channel's
678 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
679 * whereas channel->eventq_read_ptr contains the index of the "next to
682 void falcon_eventq_read_ack(struct efx_channel *channel)
685 struct efx_nic *efx = channel->efx;
687 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
688 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
692 /* Use HW to insert a SW defined event */
693 void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
695 efx_oword_t drv_ev_reg;
697 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
698 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
699 drv_ev_reg.u32[0] = event->u32[0];
700 drv_ev_reg.u32[1] = event->u32[1];
701 drv_ev_reg.u32[2] = 0;
702 drv_ev_reg.u32[3] = 0;
703 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
704 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
707 /* Handle a transmit completion event
709 * Falcon batches TX completion events; the message we receive is of
710 * the form "complete all TX events up to this index".
712 static void falcon_handle_tx_event(struct efx_channel *channel,
715 unsigned int tx_ev_desc_ptr;
716 unsigned int tx_ev_q_label;
717 struct efx_tx_queue *tx_queue;
718 struct efx_nic *efx = channel->efx;
720 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
721 /* Transmit completion */
722 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
723 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
724 tx_queue = &efx->tx_queue[tx_ev_q_label];
725 channel->irq_mod_score +=
726 (tx_ev_desc_ptr - tx_queue->read_count) &
728 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
729 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
730 /* Rewrite the FIFO write pointer */
731 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
732 tx_queue = &efx->tx_queue[tx_ev_q_label];
734 if (efx_dev_registered(efx))
735 netif_tx_lock(efx->net_dev);
736 falcon_notify_tx_desc(tx_queue);
737 if (efx_dev_registered(efx))
738 netif_tx_unlock(efx->net_dev);
739 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
740 EFX_WORKAROUND_10727(efx)) {
741 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
743 EFX_ERR(efx, "channel %d unexpected TX event "
744 EFX_QWORD_FMT"\n", channel->channel,
745 EFX_QWORD_VAL(*event));
749 /* Detect errors included in the rx_evt_pkt_ok bit. */
750 static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
751 const efx_qword_t *event,
755 struct efx_nic *efx = rx_queue->efx;
756 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
757 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
758 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
759 bool rx_ev_other_err, rx_ev_pause_frm;
760 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
761 unsigned rx_ev_pkt_type;
763 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
764 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
765 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
766 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
767 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
768 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
769 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_FRAG_ERR);
770 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
771 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
772 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
773 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
774 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
775 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
776 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
777 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
778 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
780 /* Every error apart from tobe_disc and pause_frm */
781 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
782 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
783 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
785 /* Count errors that are not in MAC stats. Ignore expected
786 * checksum errors during self-test. */
788 ++rx_queue->channel->n_rx_frm_trunc;
789 else if (rx_ev_tobe_disc)
790 ++rx_queue->channel->n_rx_tobe_disc;
791 else if (!efx->loopback_selftest) {
792 if (rx_ev_ip_hdr_chksum_err)
793 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
794 else if (rx_ev_tcp_udp_chksum_err)
795 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
797 if (rx_ev_ip_frag_err)
798 ++rx_queue->channel->n_rx_ip_frag_err;
800 /* The frame must be discarded if any of these are true. */
801 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
802 rx_ev_tobe_disc | rx_ev_pause_frm);
804 /* TOBE_DISC is expected on unicast mismatches; don't print out an
805 * error message. FRM_TRUNC indicates RXDP dropped the packet due
806 * to a FIFO overflow.
808 #ifdef EFX_ENABLE_DEBUG
809 if (rx_ev_other_err) {
810 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
811 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
812 rx_queue->queue, EFX_QWORD_VAL(*event),
813 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
814 rx_ev_ip_hdr_chksum_err ?
815 " [IP_HDR_CHKSUM_ERR]" : "",
816 rx_ev_tcp_udp_chksum_err ?
817 " [TCP_UDP_CHKSUM_ERR]" : "",
818 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
819 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
820 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
821 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
822 rx_ev_pause_frm ? " [PAUSE]" : "");
827 /* Handle receive events that are not in-order. */
828 static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
831 struct efx_nic *efx = rx_queue->efx;
832 unsigned expected, dropped;
834 expected = rx_queue->removed_count & EFX_RXQ_MASK;
835 dropped = (index - expected) & EFX_RXQ_MASK;
836 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
837 dropped, index, expected);
839 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
840 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
843 /* Handle a packet received event
845 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
846 * wrong destination address
847 * Also "is multicast" and "matches multicast filter" flags can be used to
848 * discard non-matching multicast packets.
850 static void falcon_handle_rx_event(struct efx_channel *channel,
851 const efx_qword_t *event)
853 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
854 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
855 unsigned expected_ptr;
856 bool rx_ev_pkt_ok, discard = false, checksummed;
857 struct efx_rx_queue *rx_queue;
858 struct efx_nic *efx = channel->efx;
860 /* Basic packet information */
861 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
862 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
863 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
864 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
865 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
866 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
869 rx_queue = &efx->rx_queue[channel->channel];
871 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
872 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
873 if (unlikely(rx_ev_desc_ptr != expected_ptr))
874 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
876 if (likely(rx_ev_pkt_ok)) {
877 /* If packet is marked as OK and packet type is TCP/IPv4 or
878 * UDP/IPv4, then we can rely on the hardware checksum.
881 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
882 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP;
884 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
889 /* Detect multicast packets that didn't match the filter */
890 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
891 if (rx_ev_mcast_pkt) {
892 unsigned int rx_ev_mcast_hash_match =
893 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
895 if (unlikely(!rx_ev_mcast_hash_match))
899 channel->irq_mod_score += 2;
901 /* Handle received packet */
902 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
903 checksummed, discard);
906 /* Global events are basically PHY events */
907 static void falcon_handle_global_event(struct efx_channel *channel,
910 struct efx_nic *efx = channel->efx;
911 bool handled = false;
913 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
914 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
915 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
916 efx->phy_op->clear_interrupt(efx);
917 queue_work(efx->workqueue, &efx->phy_work);
921 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
922 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
923 queue_work(efx->workqueue, &efx->mac_work);
927 if (falcon_rev(efx) <= FALCON_REV_A1 ?
928 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
929 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
930 EFX_ERR(efx, "channel %d seen global RX_RESET "
931 "event. Resetting.\n", channel->channel);
933 atomic_inc(&efx->rx_reset);
934 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
935 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
940 EFX_ERR(efx, "channel %d unknown global event "
941 EFX_QWORD_FMT "\n", channel->channel,
942 EFX_QWORD_VAL(*event));
945 static void falcon_handle_driver_event(struct efx_channel *channel,
948 struct efx_nic *efx = channel->efx;
949 unsigned int ev_sub_code;
950 unsigned int ev_sub_data;
952 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
953 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
955 switch (ev_sub_code) {
956 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
957 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
958 channel->channel, ev_sub_data);
960 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
961 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
962 channel->channel, ev_sub_data);
964 case FSE_AZ_EVQ_INIT_DONE_EV:
965 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
966 channel->channel, ev_sub_data);
968 case FSE_AZ_SRM_UPD_DONE_EV:
969 EFX_TRACE(efx, "channel %d SRAM update done\n",
972 case FSE_AZ_WAKE_UP_EV:
973 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
974 channel->channel, ev_sub_data);
976 case FSE_AZ_TIMER_EV:
977 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
978 channel->channel, ev_sub_data);
980 case FSE_AA_RX_RECOVER_EV:
981 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
982 "Resetting.\n", channel->channel);
983 atomic_inc(&efx->rx_reset);
984 efx_schedule_reset(efx,
985 EFX_WORKAROUND_6555(efx) ?
986 RESET_TYPE_RX_RECOVERY :
989 case FSE_BZ_RX_DSC_ERROR_EV:
990 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
991 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
992 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
994 case FSE_BZ_TX_DSC_ERROR_EV:
995 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
996 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
997 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1000 EFX_TRACE(efx, "channel %d unknown driver event code %d "
1001 "data %04x\n", channel->channel, ev_sub_code,
1007 int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1009 unsigned int read_ptr;
1010 efx_qword_t event, *p_event;
1014 read_ptr = channel->eventq_read_ptr;
1017 p_event = falcon_event(channel, read_ptr);
1020 if (!falcon_event_present(&event))
1024 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1025 channel->channel, EFX_QWORD_VAL(event));
1027 /* Clear this event by marking it all ones */
1028 EFX_SET_QWORD(*p_event);
1030 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1033 case FSE_AZ_EV_CODE_RX_EV:
1034 falcon_handle_rx_event(channel, &event);
1037 case FSE_AZ_EV_CODE_TX_EV:
1038 falcon_handle_tx_event(channel, &event);
1040 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1041 channel->eventq_magic = EFX_QWORD_FIELD(
1042 event, FSF_AZ_DRV_GEN_EV_MAGIC);
1043 EFX_LOG(channel->efx, "channel %d received generated "
1044 "event "EFX_QWORD_FMT"\n", channel->channel,
1045 EFX_QWORD_VAL(event));
1047 case FSE_AZ_EV_CODE_GLOBAL_EV:
1048 falcon_handle_global_event(channel, &event);
1050 case FSE_AZ_EV_CODE_DRIVER_EV:
1051 falcon_handle_driver_event(channel, &event);
1054 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1055 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1056 ev_code, EFX_QWORD_VAL(event));
1059 /* Increment read pointer */
1060 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1062 } while (rx_packets < rx_quota);
1064 channel->eventq_read_ptr = read_ptr;
1068 void falcon_set_int_moderation(struct efx_channel *channel)
1070 efx_dword_t timer_cmd;
1071 struct efx_nic *efx = channel->efx;
1073 /* Set timer register */
1074 if (channel->irq_moderation) {
1075 /* Round to resolution supported by hardware. The value we
1076 * program is based at 0. So actual interrupt moderation
1077 * achieved is ((x + 1) * res).
1079 channel->irq_moderation -= (channel->irq_moderation %
1080 FALCON_IRQ_MOD_RESOLUTION);
1081 if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
1082 channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
1083 EFX_POPULATE_DWORD_2(timer_cmd,
1084 FRF_AB_TC_TIMER_MODE,
1085 FFE_BB_TIMER_MODE_INT_HLDOFF,
1086 FRF_AB_TC_TIMER_VAL,
1087 channel->irq_moderation /
1088 FALCON_IRQ_MOD_RESOLUTION - 1);
1090 EFX_POPULATE_DWORD_2(timer_cmd,
1091 FRF_AB_TC_TIMER_MODE,
1092 FFE_BB_TIMER_MODE_DIS,
1093 FRF_AB_TC_TIMER_VAL, 0);
1095 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1096 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
1101 /* Allocate buffer table entries for event queue */
1102 int falcon_probe_eventq(struct efx_channel *channel)
1104 struct efx_nic *efx = channel->efx;
1105 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1106 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1107 return falcon_alloc_special_buffer(efx, &channel->eventq,
1108 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1111 void falcon_init_eventq(struct efx_channel *channel)
1113 efx_oword_t evq_ptr;
1114 struct efx_nic *efx = channel->efx;
1116 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1117 channel->channel, channel->eventq.index,
1118 channel->eventq.index + channel->eventq.entries - 1);
1120 /* Pin event queue buffer */
1121 falcon_init_special_buffer(efx, &channel->eventq);
1123 /* Fill event queue with all ones (i.e. empty events) */
1124 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1126 /* Push event queue to card */
1127 EFX_POPULATE_OWORD_3(evq_ptr,
1129 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1130 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1131 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1134 falcon_set_int_moderation(channel);
1137 void falcon_fini_eventq(struct efx_channel *channel)
1139 efx_oword_t eventq_ptr;
1140 struct efx_nic *efx = channel->efx;
1142 /* Remove event queue from card */
1143 EFX_ZERO_OWORD(eventq_ptr);
1144 efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1147 /* Unpin event queue */
1148 falcon_fini_special_buffer(efx, &channel->eventq);
1151 /* Free buffers backing event queue */
1152 void falcon_remove_eventq(struct efx_channel *channel)
1154 falcon_free_special_buffer(channel->efx, &channel->eventq);
1158 /* Generates a test event on the event queue. A subsequent call to
1159 * process_eventq() should pick up the event and place the value of
1160 * "magic" into channel->eventq_magic;
1162 void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1164 efx_qword_t test_event;
1166 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1167 FSE_AZ_EV_CODE_DRV_GEN_EV,
1168 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1169 falcon_generate_event(channel, &test_event);
1172 void falcon_sim_phy_event(struct efx_nic *efx)
1174 efx_qword_t phy_event;
1176 EFX_POPULATE_QWORD_1(phy_event, FSF_AZ_EV_CODE,
1177 FSE_AZ_EV_CODE_GLOBAL_EV);
1179 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_XG_PHY0_INTR, 1);
1181 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_G_PHY0_INTR, 1);
1183 falcon_generate_event(&efx->channel[0], &phy_event);
1186 /**************************************************************************
1190 **************************************************************************/
1193 static void falcon_poll_flush_events(struct efx_nic *efx)
1195 struct efx_channel *channel = &efx->channel[0];
1196 struct efx_tx_queue *tx_queue;
1197 struct efx_rx_queue *rx_queue;
1198 unsigned int read_ptr = channel->eventq_read_ptr;
1199 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1202 efx_qword_t *event = falcon_event(channel, read_ptr);
1203 int ev_code, ev_sub_code, ev_queue;
1206 if (!falcon_event_present(event))
1209 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1210 ev_sub_code = EFX_QWORD_FIELD(*event,
1211 FSF_AZ_DRIVER_EV_SUBCODE);
1212 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1213 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1214 ev_queue = EFX_QWORD_FIELD(*event,
1215 FSF_AZ_DRIVER_EV_SUBDATA);
1216 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1217 tx_queue = efx->tx_queue + ev_queue;
1218 tx_queue->flushed = true;
1220 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1221 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1222 ev_queue = EFX_QWORD_FIELD(
1223 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1224 ev_failed = EFX_QWORD_FIELD(
1225 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1226 if (ev_queue < efx->n_rx_queues) {
1227 rx_queue = efx->rx_queue + ev_queue;
1229 /* retry the rx flush */
1231 falcon_flush_rx_queue(rx_queue);
1233 rx_queue->flushed = true;
1237 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1238 } while (read_ptr != end_ptr);
1241 /* Handle tx and rx flushes at the same time, since they run in
1242 * parallel in the hardware and there's no reason for us to
1244 int falcon_flush_queues(struct efx_nic *efx)
1246 struct efx_rx_queue *rx_queue;
1247 struct efx_tx_queue *tx_queue;
1251 /* Issue flush requests */
1252 efx_for_each_tx_queue(tx_queue, efx) {
1253 tx_queue->flushed = false;
1254 falcon_flush_tx_queue(tx_queue);
1256 efx_for_each_rx_queue(rx_queue, efx) {
1257 rx_queue->flushed = false;
1258 falcon_flush_rx_queue(rx_queue);
1261 /* Poll the evq looking for flush completions. Since we're not pushing
1262 * any more rx or tx descriptors at this point, we're in no danger of
1263 * overflowing the evq whilst we wait */
1264 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1265 msleep(FALCON_FLUSH_INTERVAL);
1266 falcon_poll_flush_events(efx);
1268 /* Check if every queue has been succesfully flushed */
1269 outstanding = false;
1270 efx_for_each_tx_queue(tx_queue, efx)
1271 outstanding |= !tx_queue->flushed;
1272 efx_for_each_rx_queue(rx_queue, efx)
1273 outstanding |= !rx_queue->flushed;
1278 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway. "flushed" now
1280 * indicates that we tried to flush. */
1281 efx_for_each_tx_queue(tx_queue, efx) {
1282 if (!tx_queue->flushed)
1283 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1285 tx_queue->flushed = true;
1287 efx_for_each_rx_queue(rx_queue, efx) {
1288 if (!rx_queue->flushed)
1289 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1291 rx_queue->flushed = true;
1294 if (EFX_WORKAROUND_7803(efx))
1300 /**************************************************************************
1302 * Falcon hardware interrupts
1303 * The hardware interrupt handler does very little work; all the event
1304 * queue processing is carried out by per-channel tasklets.
1306 **************************************************************************/
1308 /* Enable/disable/generate Falcon interrupts */
1309 static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1312 efx_oword_t int_en_reg_ker;
1314 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1315 FRF_AZ_KER_INT_KER, force,
1316 FRF_AZ_DRV_INT_EN_KER, enabled);
1317 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1320 void falcon_enable_interrupts(struct efx_nic *efx)
1322 efx_oword_t int_adr_reg_ker;
1323 struct efx_channel *channel;
1325 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1326 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1328 /* Program address */
1329 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1330 FRF_AZ_NORM_INT_VEC_DIS_KER,
1331 EFX_INT_MODE_USE_MSI(efx),
1332 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1333 efx_writeo(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER);
1335 /* Enable interrupts */
1336 falcon_interrupts(efx, 1, 0);
1338 /* Force processing of all the channels to get the EVQ RPTRs up to
1340 efx_for_each_channel(channel, efx)
1341 efx_schedule_channel(channel);
1344 void falcon_disable_interrupts(struct efx_nic *efx)
1346 /* Disable interrupts */
1347 falcon_interrupts(efx, 0, 0);
1350 /* Generate a Falcon test interrupt
1351 * Interrupt must already have been enabled, otherwise nasty things
1354 void falcon_generate_interrupt(struct efx_nic *efx)
1356 falcon_interrupts(efx, 1, 1);
1359 /* Acknowledge a legacy interrupt from Falcon
1361 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1363 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1364 * BIU. Interrupt acknowledge is read sensitive so must write instead
1365 * (then read to ensure the BIU collector is flushed)
1367 * NB most hardware supports MSI interrupts
1369 static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1373 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1374 efx_writed(efx, ®, FR_AA_INT_ACK_KER);
1375 efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1378 /* Process a fatal interrupt
1379 * Disable bus mastering ASAP and schedule a reset
1381 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1383 struct falcon_nic_data *nic_data = efx->nic_data;
1384 efx_oword_t *int_ker = efx->irq_status.addr;
1385 efx_oword_t fatal_intr;
1386 int error, mem_perr;
1388 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1389 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1391 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1392 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1393 EFX_OWORD_VAL(fatal_intr),
1394 error ? "disabling bus mastering" : "no recognised error");
1398 /* If this is a memory parity error dump which blocks are offending */
1399 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1402 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1403 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1404 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1407 /* Disable both devices */
1408 pci_clear_master(efx->pci_dev);
1409 if (FALCON_IS_DUAL_FUNC(efx))
1410 pci_clear_master(nic_data->pci_dev2);
1411 falcon_disable_interrupts(efx);
1413 /* Count errors and reset or disable the NIC accordingly */
1414 if (nic_data->int_error_count == 0 ||
1415 time_after(jiffies, nic_data->int_error_expire)) {
1416 nic_data->int_error_count = 0;
1417 nic_data->int_error_expire =
1418 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1420 if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
1421 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1422 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1424 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1425 "NIC will be disabled\n");
1426 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1432 /* Handle a legacy interrupt from Falcon
1433 * Acknowledges the interrupt and schedule event queue processing.
1435 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1437 struct efx_nic *efx = dev_id;
1438 efx_oword_t *int_ker = efx->irq_status.addr;
1439 irqreturn_t result = IRQ_NONE;
1440 struct efx_channel *channel;
1445 /* Read the ISR which also ACKs the interrupts */
1446 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1447 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1449 /* Check to see if we have a serious error condition */
1450 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1451 if (unlikely(syserr))
1452 return falcon_fatal_interrupt(efx);
1454 /* Schedule processing of any interrupting queues */
1455 efx_for_each_channel(channel, efx) {
1457 falcon_event_present(
1458 falcon_event(channel, channel->eventq_read_ptr))) {
1459 efx_schedule_channel(channel);
1460 result = IRQ_HANDLED;
1465 if (result == IRQ_HANDLED) {
1466 efx->last_irq_cpu = raw_smp_processor_id();
1467 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1468 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1475 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1477 struct efx_nic *efx = dev_id;
1478 efx_oword_t *int_ker = efx->irq_status.addr;
1479 struct efx_channel *channel;
1483 /* Check to see if this is our interrupt. If it isn't, we
1484 * exit without having touched the hardware.
1486 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1487 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1488 raw_smp_processor_id());
1491 efx->last_irq_cpu = raw_smp_processor_id();
1492 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1493 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1495 /* Check to see if we have a serious error condition */
1496 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1497 if (unlikely(syserr))
1498 return falcon_fatal_interrupt(efx);
1500 /* Determine interrupting queues, clear interrupt status
1501 * register and acknowledge the device interrupt.
1503 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1504 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1505 EFX_ZERO_OWORD(*int_ker);
1506 wmb(); /* Ensure the vector is cleared before interrupt ack */
1507 falcon_irq_ack_a1(efx);
1509 /* Schedule processing of any interrupting queues */
1510 channel = &efx->channel[0];
1513 efx_schedule_channel(channel);
1521 /* Handle an MSI interrupt from Falcon
1523 * Handle an MSI hardware interrupt. This routine schedules event
1524 * queue processing. No interrupt acknowledgement cycle is necessary.
1525 * Also, we never need to check that the interrupt is for us, since
1526 * MSI interrupts cannot be shared.
1528 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1530 struct efx_channel *channel = dev_id;
1531 struct efx_nic *efx = channel->efx;
1532 efx_oword_t *int_ker = efx->irq_status.addr;
1535 efx->last_irq_cpu = raw_smp_processor_id();
1536 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1537 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1539 /* Check to see if we have a serious error condition */
1540 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1541 if (unlikely(syserr))
1542 return falcon_fatal_interrupt(efx);
1544 /* Schedule processing of the channel */
1545 efx_schedule_channel(channel);
1551 /* Setup RSS indirection table.
1552 * This maps from the hash value of the packet to RXQ
1554 static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1557 unsigned long offset;
1560 if (falcon_rev(efx) < FALCON_REV_B0)
1563 for (offset = FR_BZ_RX_INDIRECTION_TBL;
1564 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1566 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1567 i % efx->n_rx_queues);
1568 efx_writed(efx, &dword, offset);
1573 /* Hook interrupt handler(s)
1574 * Try MSI and then legacy interrupts.
1576 int falcon_init_interrupt(struct efx_nic *efx)
1578 struct efx_channel *channel;
1581 if (!EFX_INT_MODE_USE_MSI(efx)) {
1582 irq_handler_t handler;
1583 if (falcon_rev(efx) >= FALCON_REV_B0)
1584 handler = falcon_legacy_interrupt_b0;
1586 handler = falcon_legacy_interrupt_a1;
1588 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1591 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1598 /* Hook MSI or MSI-X interrupt */
1599 efx_for_each_channel(channel, efx) {
1600 rc = request_irq(channel->irq, falcon_msi_interrupt,
1601 IRQF_PROBE_SHARED, /* Not shared */
1602 channel->name, channel);
1604 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1612 efx_for_each_channel(channel, efx)
1613 free_irq(channel->irq, channel);
1618 void falcon_fini_interrupt(struct efx_nic *efx)
1620 struct efx_channel *channel;
1623 /* Disable MSI/MSI-X interrupts */
1624 efx_for_each_channel(channel, efx) {
1626 free_irq(channel->irq, channel);
1629 /* ACK legacy interrupt */
1630 if (falcon_rev(efx) >= FALCON_REV_B0)
1631 efx_reado(efx, ®, FR_BZ_INT_ISR0);
1633 falcon_irq_ack_a1(efx);
1635 /* Disable legacy interrupt */
1636 if (efx->legacy_irq)
1637 free_irq(efx->legacy_irq, efx);
1640 /**************************************************************************
1644 **************************************************************************
1647 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1649 static int falcon_spi_poll(struct efx_nic *efx)
1652 efx_reado(efx, ®, FR_AB_EE_SPI_HCMD);
1653 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1656 /* Wait for SPI command completion */
1657 static int falcon_spi_wait(struct efx_nic *efx)
1659 /* Most commands will finish quickly, so we start polling at
1660 * very short intervals. Sometimes the command may have to
1661 * wait for VPD or expansion ROM access outside of our
1662 * control, so we allow up to 100 ms. */
1663 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
1666 for (i = 0; i < 10; i++) {
1667 if (!falcon_spi_poll(efx))
1673 if (!falcon_spi_poll(efx))
1675 if (time_after_eq(jiffies, timeout)) {
1676 EFX_ERR(efx, "timed out waiting for SPI\n");
1679 schedule_timeout_uninterruptible(1);
1683 int falcon_spi_cmd(const struct efx_spi_device *spi,
1684 unsigned int command, int address,
1685 const void *in, void *out, size_t len)
1687 struct efx_nic *efx = spi->efx;
1688 bool addressed = (address >= 0);
1689 bool reading = (out != NULL);
1693 /* Input validation */
1694 if (len > FALCON_SPI_MAX_LEN)
1696 BUG_ON(!mutex_is_locked(&efx->spi_lock));
1698 /* Check that previous command is not still running */
1699 rc = falcon_spi_poll(efx);
1703 /* Program address register, if we have an address */
1705 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1706 efx_writeo(efx, ®, FR_AB_EE_SPI_HADR);
1709 /* Program data register, if we have data */
1711 memcpy(®, in, len);
1712 efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA);
1715 /* Issue read/write command */
1716 EFX_POPULATE_OWORD_7(reg,
1717 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
1718 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
1719 FRF_AB_EE_SPI_HCMD_DABCNT, len,
1720 FRF_AB_EE_SPI_HCMD_READ, reading,
1721 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
1722 FRF_AB_EE_SPI_HCMD_ADBCNT,
1723 (addressed ? spi->addr_len : 0),
1724 FRF_AB_EE_SPI_HCMD_ENC, command);
1725 efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD);
1727 /* Wait for read/write to complete */
1728 rc = falcon_spi_wait(efx);
1734 efx_reado(efx, ®, FR_AB_EE_SPI_HDATA);
1735 memcpy(out, ®, len);
1742 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1744 return min(FALCON_SPI_MAX_LEN,
1745 (spi->block_size - (start & (spi->block_size - 1))));
1749 efx_spi_munge_command(const struct efx_spi_device *spi,
1750 const u8 command, const unsigned int address)
1752 return command | (((address >> 8) & spi->munge_address) << 3);
1755 /* Wait up to 10 ms for buffered write completion */
1756 int falcon_spi_wait_write(const struct efx_spi_device *spi)
1758 struct efx_nic *efx = spi->efx;
1759 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1764 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1765 &status, sizeof(status));
1768 if (!(status & SPI_STATUS_NRDY))
1770 if (time_after_eq(jiffies, timeout)) {
1771 EFX_ERR(efx, "SPI write timeout on device %d"
1772 " last status=0x%02x\n",
1773 spi->device_id, status);
1776 schedule_timeout_uninterruptible(1);
1780 int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1781 size_t len, size_t *retlen, u8 *buffer)
1783 size_t block_len, pos = 0;
1784 unsigned int command;
1788 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1790 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1791 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
1792 buffer + pos, block_len);
1797 /* Avoid locking up the system */
1799 if (signal_pending(current)) {
1810 int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1811 size_t len, size_t *retlen, const u8 *buffer)
1813 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1814 size_t block_len, pos = 0;
1815 unsigned int command;
1819 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
1823 block_len = min(len - pos,
1824 falcon_spi_write_limit(spi, start + pos));
1825 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1826 rc = falcon_spi_cmd(spi, command, start + pos,
1827 buffer + pos, NULL, block_len);
1831 rc = falcon_spi_wait_write(spi);
1835 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1836 rc = falcon_spi_cmd(spi, command, start + pos,
1837 NULL, verify_buffer, block_len);
1838 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1845 /* Avoid locking up the system */
1847 if (signal_pending(current)) {
1858 /**************************************************************************
1862 **************************************************************************
1865 static int falcon_reset_macs(struct efx_nic *efx)
1870 if (falcon_rev(efx) < FALCON_REV_B0) {
1871 /* It's not safe to use GLB_CTL_REG to reset the
1872 * macs, so instead use the internal MAC resets
1874 if (!EFX_IS10G(efx)) {
1875 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1876 efx_writeo(efx, ®, FR_AB_GM_CFG1);
1879 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1880 efx_writeo(efx, ®, FR_AB_GM_CFG1);
1884 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1885 efx_writeo(efx, ®, FR_AB_XM_GLB_CFG);
1887 for (count = 0; count < 10000; count++) {
1888 efx_reado(efx, ®, FR_AB_XM_GLB_CFG);
1889 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1895 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1900 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1901 * the drain sequence with the statistics fetch */
1902 efx_stats_disable(efx);
1904 efx_reado(efx, ®, FR_AB_MAC_CTRL);
1905 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
1906 efx_writeo(efx, ®, FR_AB_MAC_CTRL);
1908 efx_reado(efx, ®, FR_AB_GLB_CTL);
1909 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1910 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1911 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1912 efx_writeo(efx, ®, FR_AB_GLB_CTL);
1916 efx_reado(efx, ®, FR_AB_GLB_CTL);
1917 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1918 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1919 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1920 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1925 EFX_ERR(efx, "MAC reset failed\n");
1932 efx_stats_enable(efx);
1934 /* If we've reset the EM block and the link is up, then
1935 * we'll have to kick the XAUI link so the PHY can recover */
1936 if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1937 falcon_reset_xaui(efx);
1942 void falcon_drain_tx_fifo(struct efx_nic *efx)
1946 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1947 (efx->loopback_mode != LOOPBACK_NONE))
1950 efx_reado(efx, ®, FR_AB_MAC_CTRL);
1951 /* There is no point in draining more than once */
1952 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1955 falcon_reset_macs(efx);
1958 void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1962 if (falcon_rev(efx) < FALCON_REV_B0)
1965 /* Isolate the MAC -> RX */
1966 efx_reado(efx, ®, FR_AZ_RX_CFG);
1967 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1968 efx_writeo(efx, ®, FR_AZ_RX_CFG);
1971 falcon_drain_tx_fifo(efx);
1974 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1980 switch (efx->link_speed) {
1981 case 10000: link_speed = 3; break;
1982 case 1000: link_speed = 2; break;
1983 case 100: link_speed = 1; break;
1984 default: link_speed = 0; break;
1986 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1987 * as advertised. Disable to ensure packets are not
1988 * indefinitely held and TX queue can be flushed at any point
1989 * while the link is down. */
1990 EFX_POPULATE_OWORD_5(reg,
1991 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1992 FRF_AB_MAC_BCAD_ACPT, 1,
1993 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1994 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1995 FRF_AB_MAC_SPEED, link_speed);
1996 /* On B0, MAC backpressure can be disabled and packets get
1998 if (falcon_rev(efx) >= FALCON_REV_B0) {
1999 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
2003 efx_writeo(efx, ®, FR_AB_MAC_CTRL);
2005 /* Restore the multicast hash registers. */
2006 falcon_set_multicast_hash(efx);
2008 /* Transmission of pause frames when RX crosses the threshold is
2009 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2010 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2011 tx_fc = !!(efx->link_fc & EFX_FC_TX);
2012 efx_reado(efx, ®, FR_AZ_RX_CFG);
2013 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
2015 /* Unisolate the MAC -> RX */
2016 if (falcon_rev(efx) >= FALCON_REV_B0)
2017 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2018 efx_writeo(efx, ®, FR_AZ_RX_CFG);
2021 int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2027 if (disable_dma_stats)
2030 /* Statistics fetch will fail if the MAC is in TX drain */
2031 if (falcon_rev(efx) >= FALCON_REV_B0) {
2033 efx_reado(efx, &temp, FR_AB_MAC_CTRL);
2034 if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN))
2038 dma_done = (efx->stats_buffer.addr + done_offset);
2039 *dma_done = FALCON_STATS_NOT_DONE;
2040 wmb(); /* ensure done flag is clear */
2042 /* Initiate DMA transfer of stats */
2043 EFX_POPULATE_OWORD_2(reg,
2044 FRF_AB_MAC_STAT_DMA_CMD, 1,
2045 FRF_AB_MAC_STAT_DMA_ADR,
2046 efx->stats_buffer.dma_addr);
2047 efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA);
2049 /* Wait for transfer to complete */
2050 for (i = 0; i < 400; i++) {
2051 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
2052 rmb(); /* Ensure the stats are valid. */
2058 EFX_ERR(efx, "timed out waiting for statistics\n");
2062 /**************************************************************************
2064 * PHY access via GMII
2066 **************************************************************************
2069 /* Wait for GMII access to complete */
2070 static int falcon_gmii_wait(struct efx_nic *efx)
2072 efx_dword_t md_stat;
2075 /* wait upto 50ms - taken max from datasheet */
2076 for (count = 0; count < 5000; count++) {
2077 efx_readd(efx, &md_stat, FR_AB_MD_STAT);
2078 if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
2079 if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
2080 EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2081 EFX_ERR(efx, "error from GMII access "
2083 EFX_DWORD_VAL(md_stat));
2090 EFX_ERR(efx, "timed out waiting for GMII\n");
2094 /* Write an MDIO register of a PHY connected to Falcon. */
2095 static int falcon_mdio_write(struct net_device *net_dev,
2096 int prtad, int devad, u16 addr, u16 value)
2098 struct efx_nic *efx = netdev_priv(net_dev);
2102 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
2103 prtad, devad, addr, value);
2105 spin_lock_bh(&efx->phy_lock);
2107 /* Check MDIO not currently being accessed */
2108 rc = falcon_gmii_wait(efx);
2112 /* Write the address/ID register */
2113 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2114 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
2116 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2117 FRF_AB_MD_DEV_ADR, devad);
2118 efx_writeo(efx, ®, FR_AB_MD_ID);
2121 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2122 efx_writeo(efx, ®, FR_AB_MD_TXD);
2124 EFX_POPULATE_OWORD_2(reg,
2127 efx_writeo(efx, ®, FR_AB_MD_CS);
2129 /* Wait for data to be written */
2130 rc = falcon_gmii_wait(efx);
2132 /* Abort the write operation */
2133 EFX_POPULATE_OWORD_2(reg,
2136 efx_writeo(efx, ®, FR_AB_MD_CS);
2141 spin_unlock_bh(&efx->phy_lock);
2145 /* Read an MDIO register of a PHY connected to Falcon. */
2146 static int falcon_mdio_read(struct net_device *net_dev,
2147 int prtad, int devad, u16 addr)
2149 struct efx_nic *efx = netdev_priv(net_dev);
2153 spin_lock_bh(&efx->phy_lock);
2155 /* Check MDIO not currently being accessed */
2156 rc = falcon_gmii_wait(efx);
2160 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2161 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
2163 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2164 FRF_AB_MD_DEV_ADR, devad);
2165 efx_writeo(efx, ®, FR_AB_MD_ID);
2167 /* Request data to be read */
2168 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2169 efx_writeo(efx, ®, FR_AB_MD_CS);
2171 /* Wait for data to become available */
2172 rc = falcon_gmii_wait(efx);
2174 efx_reado(efx, ®, FR_AB_MD_RXD);
2175 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2176 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2177 prtad, devad, addr, rc);
2179 /* Abort the read operation */
2180 EFX_POPULATE_OWORD_2(reg,
2183 efx_writeo(efx, ®, FR_AB_MD_CS);
2185 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2186 prtad, devad, addr, rc);
2190 spin_unlock_bh(&efx->phy_lock);
2194 static int falcon_probe_phy(struct efx_nic *efx)
2196 switch (efx->phy_type) {
2197 case PHY_TYPE_SFX7101:
2198 efx->phy_op = &falcon_sfx7101_phy_ops;
2200 case PHY_TYPE_SFT9001A:
2201 case PHY_TYPE_SFT9001B:
2202 efx->phy_op = &falcon_sft9001_phy_ops;
2204 case PHY_TYPE_QT2022C2:
2205 case PHY_TYPE_QT2025C:
2206 efx->phy_op = &falcon_xfp_phy_ops;
2209 EFX_ERR(efx, "Unknown PHY type %d\n",
2214 if (efx->phy_op->macs & EFX_XMAC)
2215 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2216 (1 << LOOPBACK_XGXS) |
2217 (1 << LOOPBACK_XAUI));
2218 if (efx->phy_op->macs & EFX_GMAC)
2219 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2220 efx->loopback_modes |= efx->phy_op->loopbacks;
2225 int falcon_switch_mac(struct efx_nic *efx)
2227 struct efx_mac_operations *old_mac_op = efx->mac_op;
2228 efx_oword_t nic_stat;
2232 /* Don't try to fetch MAC stats while we're switching MACs */
2233 efx_stats_disable(efx);
2235 /* Internal loopbacks override the phy speed setting */
2236 if (efx->loopback_mode == LOOPBACK_GMAC) {
2237 efx->link_speed = 1000;
2238 efx->link_fd = true;
2239 } else if (LOOPBACK_INTERNAL(efx)) {
2240 efx->link_speed = 10000;
2241 efx->link_fd = true;
2244 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2245 efx->mac_op = (EFX_IS10G(efx) ?
2246 &falcon_xmac_operations : &falcon_gmac_operations);
2248 /* Always push the NIC_STAT_REG setting even if the mac hasn't
2249 * changed, because this function is run post online reset */
2250 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2251 strap_val = EFX_IS10G(efx) ? 5 : 3;
2252 if (falcon_rev(efx) >= FALCON_REV_B0) {
2253 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
2254 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
2255 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
2257 /* Falcon A1 does not support 1G/10G speed switching
2258 * and must not be used with a PHY that does. */
2259 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
2263 if (old_mac_op == efx->mac_op)
2266 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2267 /* Not all macs support a mac-level link state */
2270 rc = falcon_reset_macs(efx);
2272 efx_stats_enable(efx);
2276 /* This call is responsible for hooking in the MAC and PHY operations */
2277 int falcon_probe_port(struct efx_nic *efx)
2281 /* Hook in PHY operations table */
2282 rc = falcon_probe_phy(efx);
2286 /* Set up MDIO structure for PHY */
2287 efx->mdio.mmds = efx->phy_op->mmds;
2288 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
2289 efx->mdio.mdio_read = falcon_mdio_read;
2290 efx->mdio.mdio_write = falcon_mdio_write;
2292 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2293 if (falcon_rev(efx) >= FALCON_REV_B0)
2294 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2296 efx->wanted_fc = EFX_FC_RX;
2298 /* Allocate buffer for stats */
2299 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2300 FALCON_MAC_STATS_SIZE);
2303 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
2304 (u64)efx->stats_buffer.dma_addr,
2305 efx->stats_buffer.addr,
2306 (u64)virt_to_phys(efx->stats_buffer.addr));
2311 void falcon_remove_port(struct efx_nic *efx)
2313 falcon_free_buffer(efx, &efx->stats_buffer);
2316 /**************************************************************************
2318 * Multicast filtering
2320 **************************************************************************
2323 void falcon_set_multicast_hash(struct efx_nic *efx)
2325 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2327 /* Broadcast packets go through the multicast hash filter.
2328 * ether_crc_le() of the broadcast address is 0xbe2612ff
2329 * so we always add bit 0xff to the mask.
2331 set_bit_le(0xff, mc_hash->byte);
2333 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
2334 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
2338 /**************************************************************************
2342 **************************************************************************/
2344 int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2346 struct falcon_nvconfig *nvconfig;
2347 struct efx_spi_device *spi;
2349 int rc, magic_num, struct_ver;
2350 __le16 *word, *limit;
2353 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2357 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2360 nvconfig = region + FALCON_NVCONFIG_OFFSET;
2362 mutex_lock(&efx->spi_lock);
2363 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2364 mutex_unlock(&efx->spi_lock);
2366 EFX_ERR(efx, "Failed to read %s\n",
2367 efx->spi_flash ? "flash" : "EEPROM");
2372 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2373 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2376 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
2377 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2380 if (struct_ver < 2) {
2381 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
2383 } else if (struct_ver < 4) {
2384 word = &nvconfig->board_magic_num;
2385 limit = (__le16 *) (nvconfig + 1);
2388 limit = region + FALCON_NVCONFIG_END;
2390 for (csum = 0; word < limit; ++word)
2391 csum += le16_to_cpu(*word);
2393 if (~csum & 0xffff) {
2394 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
2400 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
2407 /* Registers tested in the falcon register test */
2411 } efx_test_registers[] = {
2413 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2415 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2417 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2418 { FR_AZ_TX_RESERVED,
2419 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2421 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2422 { FR_AZ_SRM_TX_DC_CFG,
2423 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2425 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2426 { FR_AZ_RX_DC_PF_WM,
2427 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2429 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2431 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2433 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2435 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2437 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2439 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2440 { FR_AB_XM_RX_PARAM,
2441 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2443 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2445 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2447 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2450 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2451 const efx_oword_t *mask)
2453 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
2454 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2457 int falcon_test_registers(struct efx_nic *efx)
2459 unsigned address = 0, i, j;
2460 efx_oword_t mask, imask, original, reg, buf;
2462 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2463 WARN_ON(!LOOPBACK_INTERNAL(efx));
2465 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2466 address = efx_test_registers[i].address;
2467 mask = imask = efx_test_registers[i].mask;
2468 EFX_INVERT_OWORD(imask);
2470 efx_reado(efx, &original, address);
2472 /* bit sweep on and off */
2473 for (j = 0; j < 128; j++) {
2474 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2477 /* Test this testable bit can be set in isolation */
2478 EFX_AND_OWORD(reg, original, mask);
2479 EFX_SET_OWORD32(reg, j, j, 1);
2481 efx_writeo(efx, ®, address);
2482 efx_reado(efx, &buf, address);
2484 if (efx_masked_compare_oword(®, &buf, &mask))
2487 /* Test this testable bit can be cleared in isolation */
2488 EFX_OR_OWORD(reg, original, mask);
2489 EFX_SET_OWORD32(reg, j, j, 0);
2491 efx_writeo(efx, ®, address);
2492 efx_reado(efx, &buf, address);
2494 if (efx_masked_compare_oword(®, &buf, &mask))
2498 efx_writeo(efx, &original, address);
2504 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2505 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2506 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2510 /**************************************************************************
2514 **************************************************************************
2517 /* Resets NIC to known state. This routine must be called in process
2518 * context and is allowed to sleep. */
2519 int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2521 struct falcon_nic_data *nic_data = efx->nic_data;
2522 efx_oword_t glb_ctl_reg_ker;
2525 EFX_LOG(efx, "performing hardware reset (%d)\n", method);
2527 /* Initiate device reset */
2528 if (method == RESET_TYPE_WORLD) {
2529 rc = pci_save_state(efx->pci_dev);
2531 EFX_ERR(efx, "failed to backup PCI state of primary "
2532 "function prior to hardware reset\n");
2535 if (FALCON_IS_DUAL_FUNC(efx)) {
2536 rc = pci_save_state(nic_data->pci_dev2);
2538 EFX_ERR(efx, "failed to backup PCI state of "
2539 "secondary function prior to "
2540 "hardware reset\n");
2545 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2546 FRF_AB_EXT_PHY_RST_DUR,
2547 FFE_AB_EXT_PHY_RST_DUR_10240US,
2550 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2551 /* exclude PHY from "invisible" reset */
2552 FRF_AB_EXT_PHY_RST_CTL,
2553 method == RESET_TYPE_INVISIBLE,
2554 /* exclude EEPROM/flash and PCIe */
2555 FRF_AB_PCIE_CORE_RST_CTL, 1,
2556 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2557 FRF_AB_PCIE_SD_RST_CTL, 1,
2558 FRF_AB_EE_RST_CTL, 1,
2559 FRF_AB_EXT_PHY_RST_DUR,
2560 FFE_AB_EXT_PHY_RST_DUR_10240US,
2563 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2565 EFX_LOG(efx, "waiting for hardware reset\n");
2566 schedule_timeout_uninterruptible(HZ / 20);
2568 /* Restore PCI configuration if needed */
2569 if (method == RESET_TYPE_WORLD) {
2570 if (FALCON_IS_DUAL_FUNC(efx)) {
2571 rc = pci_restore_state(nic_data->pci_dev2);
2573 EFX_ERR(efx, "failed to restore PCI config for "
2574 "the secondary function\n");
2578 rc = pci_restore_state(efx->pci_dev);
2580 EFX_ERR(efx, "failed to restore PCI config for the "
2581 "primary function\n");
2584 EFX_LOG(efx, "successfully restored PCI config\n");
2587 /* Assert that reset complete */
2588 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2589 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2591 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2594 EFX_LOG(efx, "hardware reset complete\n");
2598 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2601 pci_restore_state(efx->pci_dev);
2608 /* Zeroes out the SRAM contents. This routine must be called in
2609 * process context and is allowed to sleep.
2611 static int falcon_reset_sram(struct efx_nic *efx)
2613 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2616 /* Set the SRAM wake/sleep GPIO appropriately. */
2617 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2618 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2619 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2620 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2622 /* Initiate SRAM reset */
2623 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2624 FRF_AZ_SRM_INIT_EN, 1,
2625 FRF_AZ_SRM_NB_SZ, 0);
2626 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2628 /* Wait for SRAM reset to complete */
2631 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2633 /* SRAM reset is slow; expect around 16ms */
2634 schedule_timeout_uninterruptible(HZ / 50);
2636 /* Check for reset complete */
2637 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2638 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2639 EFX_LOG(efx, "SRAM reset complete\n");
2643 } while (++count < 20); /* wait upto 0.4 sec */
2645 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2649 static int falcon_spi_device_init(struct efx_nic *efx,
2650 struct efx_spi_device **spi_device_ret,
2651 unsigned int device_id, u32 device_type)
2653 struct efx_spi_device *spi_device;
2655 if (device_type != 0) {
2656 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2659 spi_device->device_id = device_id;
2661 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2662 spi_device->addr_len =
2663 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2664 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2665 spi_device->addr_len == 1);
2666 spi_device->erase_command =
2667 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2668 spi_device->erase_size =
2669 1 << SPI_DEV_TYPE_FIELD(device_type,
2670 SPI_DEV_TYPE_ERASE_SIZE);
2671 spi_device->block_size =
2672 1 << SPI_DEV_TYPE_FIELD(device_type,
2673 SPI_DEV_TYPE_BLOCK_SIZE);
2675 spi_device->efx = efx;
2680 kfree(*spi_device_ret);
2681 *spi_device_ret = spi_device;
2686 static void falcon_remove_spi_devices(struct efx_nic *efx)
2688 kfree(efx->spi_eeprom);
2689 efx->spi_eeprom = NULL;
2690 kfree(efx->spi_flash);
2691 efx->spi_flash = NULL;
2694 /* Extract non-volatile configuration */
2695 static int falcon_probe_nvconfig(struct efx_nic *efx)
2697 struct falcon_nvconfig *nvconfig;
2701 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2705 rc = falcon_read_nvram(efx, nvconfig);
2706 if (rc == -EINVAL) {
2707 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2708 efx->phy_type = PHY_TYPE_NONE;
2709 efx->mdio.prtad = MDIO_PRTAD_NONE;
2715 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2716 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2718 efx->phy_type = v2->port0_phy_type;
2719 efx->mdio.prtad = v2->port0_phy_addr;
2720 board_rev = le16_to_cpu(v2->board_revision);
2722 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2723 rc = falcon_spi_device_init(
2724 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2725 le32_to_cpu(v3->spi_device_type
2726 [FFE_AB_SPI_DEVICE_FLASH]));
2729 rc = falcon_spi_device_init(
2730 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2731 le32_to_cpu(v3->spi_device_type
2732 [FFE_AB_SPI_DEVICE_EEPROM]));
2738 /* Read the MAC addresses */
2739 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2741 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2743 falcon_probe_board(efx, board_rev);
2749 falcon_remove_spi_devices(efx);
2755 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2756 * count, port speed). Set workaround and feature flags accordingly.
2758 static int falcon_probe_nic_variant(struct efx_nic *efx)
2760 efx_oword_t altera_build;
2761 efx_oword_t nic_stat;
2763 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2764 if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2765 EFX_ERR(efx, "Falcon FPGA not supported\n");
2769 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2771 switch (falcon_rev(efx)) {
2774 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2778 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2779 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2788 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2792 /* Initial assumed speed */
2793 efx->link_speed = EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) ? 10000 : 1000;
2798 /* Probe all SPI devices on the NIC */
2799 static void falcon_probe_spi_devices(struct efx_nic *efx)
2801 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2804 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2805 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2806 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2808 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2809 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2810 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2811 EFX_LOG(efx, "Booted from %s\n",
2812 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2814 /* Disable VPD and set clock dividers to safe
2815 * values for initial programming. */
2817 EFX_LOG(efx, "Booted from internal ASIC settings;"
2818 " setting SPI config\n");
2819 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2820 /* 125 MHz / 7 ~= 20 MHz */
2821 FRF_AB_EE_SF_CLOCK_DIV, 7,
2822 /* 125 MHz / 63 ~= 2 MHz */
2823 FRF_AB_EE_EE_CLOCK_DIV, 63);
2824 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2827 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2828 falcon_spi_device_init(efx, &efx->spi_flash,
2829 FFE_AB_SPI_DEVICE_FLASH,
2830 default_flash_type);
2831 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2832 falcon_spi_device_init(efx, &efx->spi_eeprom,
2833 FFE_AB_SPI_DEVICE_EEPROM,
2837 int falcon_probe_nic(struct efx_nic *efx)
2839 struct falcon_nic_data *nic_data;
2842 /* Allocate storage for hardware specific data */
2843 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2846 efx->nic_data = nic_data;
2848 /* Determine number of ports etc. */
2849 rc = falcon_probe_nic_variant(efx);
2853 /* Probe secondary function if expected */
2854 if (FALCON_IS_DUAL_FUNC(efx)) {
2855 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2857 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2859 if (dev->bus == efx->pci_dev->bus &&
2860 dev->devfn == efx->pci_dev->devfn + 1) {
2861 nic_data->pci_dev2 = dev;
2865 if (!nic_data->pci_dev2) {
2866 EFX_ERR(efx, "failed to find secondary function\n");
2872 /* Now we can reset the NIC */
2873 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2875 EFX_ERR(efx, "failed to reset NIC\n");
2879 /* Allocate memory for INT_KER */
2880 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2883 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2885 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
2886 (u64)efx->irq_status.dma_addr,
2887 efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
2889 falcon_probe_spi_devices(efx);
2891 /* Read in the non-volatile configuration */
2892 rc = falcon_probe_nvconfig(efx);
2896 /* Initialise I2C adapter */
2897 efx->i2c_adap.owner = THIS_MODULE;
2898 nic_data->i2c_data = falcon_i2c_bit_operations;
2899 nic_data->i2c_data.data = efx;
2900 efx->i2c_adap.algo_data = &nic_data->i2c_data;
2901 efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2902 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
2903 rc = i2c_bit_add_bus(&efx->i2c_adap);
2910 falcon_remove_spi_devices(efx);
2911 falcon_free_buffer(efx, &efx->irq_status);
2914 if (nic_data->pci_dev2) {
2915 pci_dev_put(nic_data->pci_dev2);
2916 nic_data->pci_dev2 = NULL;
2920 kfree(efx->nic_data);
2924 static void falcon_init_rx_cfg(struct efx_nic *efx)
2926 /* Prior to Siena the RX DMA engine will split each frame at
2927 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2928 * be so large that that never happens. */
2929 const unsigned huge_buf_size = (3 * 4096) >> 5;
2930 /* RX control FIFO thresholds (32 entries) */
2931 const unsigned ctrl_xon_thr = 20;
2932 const unsigned ctrl_xoff_thr = 25;
2933 /* RX data FIFO thresholds (256-byte units; size varies) */
2934 int data_xon_thr = rx_xon_thresh_bytes >> 8;
2935 int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
2938 efx_reado(efx, ®, FR_AZ_RX_CFG);
2939 if (falcon_rev(efx) <= FALCON_REV_A1) {
2940 /* Data FIFO size is 5.5K */
2941 if (data_xon_thr < 0)
2942 data_xon_thr = 512 >> 8;
2943 if (data_xoff_thr < 0)
2944 data_xoff_thr = 2048 >> 8;
2945 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2946 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2948 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
2949 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
2950 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2951 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2953 /* Data FIFO size is 80K; register fields moved */
2954 if (data_xon_thr < 0)
2955 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
2956 if (data_xoff_thr < 0)
2957 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
2958 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2959 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2961 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
2962 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
2963 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2964 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2965 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2967 efx_writeo(efx, ®, FR_AZ_RX_CFG);
2970 /* This call performs hardware-specific global initialisation, such as
2971 * defining the descriptor cache sizes and number of RSS channels.
2972 * It does not set up any buffers, descriptor rings or event queues.
2974 int falcon_init_nic(struct efx_nic *efx)
2979 /* Use on-chip SRAM */
2980 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2981 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2982 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2984 /* Set the source of the GMAC clock */
2985 if (falcon_rev(efx) == FALCON_REV_B0) {
2986 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
2987 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
2988 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
2991 rc = falcon_reset_sram(efx);
2995 /* Set positions of descriptor caches in SRAM. */
2996 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2997 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
2998 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2999 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
3001 /* Set TX descriptor cache size. */
3002 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
3003 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
3004 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
3006 /* Set RX descriptor cache size. Set low watermark to size-8, as
3007 * this allows most efficient prefetching.
3009 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
3010 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
3011 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
3012 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
3013 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
3015 /* Clear the parity enables on the TX data fifos as
3016 * they produce false parity errors because of timing issues
3018 if (EFX_WORKAROUND_5129(efx)) {
3019 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
3020 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
3021 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
3024 /* Enable all the genuinely fatal interrupts. (They are still
3025 * masked by the overall interrupt mask, controlled by
3026 * falcon_interrupts()).
3028 * Note: All other fatal interrupts are enabled
3030 EFX_POPULATE_OWORD_3(temp,
3031 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
3032 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
3033 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
3034 EFX_INVERT_OWORD(temp);
3035 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
3037 if (EFX_WORKAROUND_7244(efx)) {
3038 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
3039 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
3040 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
3041 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
3042 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
3043 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
3046 falcon_setup_rss_indir_table(efx);
3048 /* XXX This is documented only for Falcon A0/A1 */
3049 /* Setup RX. Wait for descriptor is broken and must
3050 * be disabled. RXDP recovery shouldn't be needed, but is.
3052 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3053 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
3054 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3055 if (EFX_WORKAROUND_5583(efx))
3056 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3057 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3059 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3060 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3062 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3063 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3064 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3065 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3066 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3067 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3068 /* Enable SW_EV to inherit in char driver - assume harmless here */
3069 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3070 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3071 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3072 /* Squash TX of packets of 16 bytes or less */
3073 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3074 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3075 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3077 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3078 * descriptors (which is bad).
3080 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3081 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3082 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3084 falcon_init_rx_cfg(efx);
3086 /* Set destination of both TX and RX Flush events */
3087 if (falcon_rev(efx) >= FALCON_REV_B0) {
3088 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3089 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3095 void falcon_remove_nic(struct efx_nic *efx)
3097 struct falcon_nic_data *nic_data = efx->nic_data;
3100 /* Remove I2C adapter and clear it in preparation for a retry */
3101 rc = i2c_del_adapter(&efx->i2c_adap);
3103 memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
3105 falcon_remove_spi_devices(efx);
3106 falcon_free_buffer(efx, &efx->irq_status);
3108 falcon_reset_hw(efx, RESET_TYPE_ALL);
3110 /* Release the second function after the reset */
3111 if (nic_data->pci_dev2) {
3112 pci_dev_put(nic_data->pci_dev2);
3113 nic_data->pci_dev2 = NULL;
3116 /* Tear down the private nic state */
3117 kfree(efx->nic_data);
3118 efx->nic_data = NULL;
3121 void falcon_update_nic_stats(struct efx_nic *efx)
3125 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3126 efx->n_rx_nodesc_drop_cnt +=
3127 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
3130 /**************************************************************************
3132 * Revision-dependent attributes used by efx.c
3134 **************************************************************************
3137 struct efx_nic_type falcon_a_nic_type = {
3139 .mem_map_size = 0x20000,
3140 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
3141 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
3142 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3143 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3144 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3145 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3146 .rx_buffer_padding = 0x24,
3147 .max_interrupt_mode = EFX_INT_MODE_MSI,
3148 .phys_addr_channels = 4,
3151 struct efx_nic_type falcon_b_nic_type = {
3153 /* Map everything up to and including the RSS indirection
3154 * table. Don't map MSI-X table, MSI-X PBA since Linux
3155 * requires that they not be mapped. */
3156 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
3157 FR_BZ_RX_INDIRECTION_TBL_STEP *
3158 FR_BZ_RX_INDIRECTION_TBL_ROWS),
3159 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
3160 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
3161 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3162 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3163 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3164 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3165 .rx_buffer_padding = 0,
3166 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3167 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
3168 * interrupt handler only supports 32