1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include "net_driver.h"
28 #include "workarounds.h"
30 /* Hardware control for SFC4000 (aka Falcon). */
32 /**************************************************************************
36 **************************************************************************
39 /* This is set to 16 for a good reason. In summary, if larger than
40 * 16, the descriptor cache holds more than a default socket
41 * buffer's worth of packets (for UDP we can only have at most one
42 * socket buffer's worth outstanding). This combined with the fact
43 * that we only get 1 TX event per descriptor cache means the NIC
46 #define TX_DC_ENTRIES 16
47 #define TX_DC_ENTRIES_ORDER 1
49 #define RX_DC_ENTRIES 64
50 #define RX_DC_ENTRIES_ORDER 3
52 static const unsigned int
53 /* "Large" EEPROM device: Atmel AT25640 or similar
54 * 8 KB, 16-bit address, 32 B write block */
55 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
56 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
57 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
58 /* Default flash device: Atmel AT25F1024
59 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
60 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
61 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
62 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
63 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
64 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
66 /* RX FIFO XOFF watermark
68 * When the amount of the RX FIFO increases used increases past this
69 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
70 * This also has an effect on RX/TX arbitration
72 static int rx_xoff_thresh_bytes = -1;
73 module_param(rx_xoff_thresh_bytes, int, 0644);
74 MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
76 /* RX FIFO XON watermark
78 * When the amount of the RX FIFO used decreases below this
79 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
80 * This also has an effect on RX/TX arbitration
82 static int rx_xon_thresh_bytes = -1;
83 module_param(rx_xon_thresh_bytes, int, 0644);
84 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
86 /* If FALCON_MAX_INT_ERRORS internal errors occur within
87 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
90 #define FALCON_INT_ERROR_EXPIRE 3600
91 #define FALCON_MAX_INT_ERRORS 5
93 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
95 #define FALCON_FLUSH_INTERVAL 10
96 #define FALCON_FLUSH_POLL_COUNT 100
98 /**************************************************************************
102 **************************************************************************
105 /* Size and alignment of special buffers (4KB) */
106 #define FALCON_BUF_SIZE 4096
108 /* Depth of RX flush request fifo */
109 #define FALCON_RX_FLUSH_COUNT 4
111 #define FALCON_IS_DUAL_FUNC(efx) \
112 (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
114 /**************************************************************************
116 * Falcon hardware access
118 **************************************************************************/
120 static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
123 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
127 /* Read the current event from the event queue */
128 static inline efx_qword_t *falcon_event(struct efx_channel *channel,
131 return (((efx_qword_t *) (channel->eventq.addr)) + index);
134 /* See if an event is present
136 * We check both the high and low dword of the event for all ones. We
137 * wrote all ones when we cleared the event, and no valid event can
138 * have all ones in either its high or low dwords. This approach is
139 * robust against reordering.
141 * Note that using a single 64-bit comparison is incorrect; even
142 * though the CPU read will be atomic, the DMA write may not be.
144 static inline int falcon_event_present(efx_qword_t *event)
146 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
147 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
150 /**************************************************************************
152 * I2C bus - this is a bit-bashing interface using GPIO pins
153 * Note that it uses the output enables to tristate the outputs
154 * SDA is the data pin and SCL is the clock
156 **************************************************************************
158 static void falcon_setsda(void *data, int state)
160 struct efx_nic *efx = (struct efx_nic *)data;
163 efx_reado(efx, ®, FR_AB_GPIO_CTL);
164 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
165 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
168 static void falcon_setscl(void *data, int state)
170 struct efx_nic *efx = (struct efx_nic *)data;
173 efx_reado(efx, ®, FR_AB_GPIO_CTL);
174 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
175 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
178 static int falcon_getsda(void *data)
180 struct efx_nic *efx = (struct efx_nic *)data;
183 efx_reado(efx, ®, FR_AB_GPIO_CTL);
184 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
187 static int falcon_getscl(void *data)
189 struct efx_nic *efx = (struct efx_nic *)data;
192 efx_reado(efx, ®, FR_AB_GPIO_CTL);
193 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
196 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
197 .setsda = falcon_setsda,
198 .setscl = falcon_setscl,
199 .getsda = falcon_getsda,
200 .getscl = falcon_getscl,
202 /* Wait up to 50 ms for slave to let us pull SCL high */
203 .timeout = DIV_ROUND_UP(HZ, 20),
206 /**************************************************************************
208 * Falcon special buffer handling
209 * Special buffers are used for event queues and the TX and RX
212 *************************************************************************/
215 * Initialise a Falcon special buffer
217 * This will define a buffer (previously allocated via
218 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
219 * it to be used for event queues, descriptor rings etc.
222 falcon_init_special_buffer(struct efx_nic *efx,
223 struct efx_special_buffer *buffer)
225 efx_qword_t buf_desc;
230 EFX_BUG_ON_PARANOID(!buffer->addr);
232 /* Write buffer descriptors to NIC */
233 for (i = 0; i < buffer->entries; i++) {
234 index = buffer->index + i;
235 dma_addr = buffer->dma_addr + (i * 4096);
236 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
237 index, (unsigned long long)dma_addr);
238 EFX_POPULATE_QWORD_3(buf_desc,
239 FRF_AZ_BUF_ADR_REGION, 0,
240 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
241 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
242 falcon_write_buf_tbl(efx, &buf_desc, index);
246 /* Unmaps a buffer from Falcon and clears the buffer table entries */
248 falcon_fini_special_buffer(struct efx_nic *efx,
249 struct efx_special_buffer *buffer)
251 efx_oword_t buf_tbl_upd;
252 unsigned int start = buffer->index;
253 unsigned int end = (buffer->index + buffer->entries - 1);
255 if (!buffer->entries)
258 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
259 buffer->index, buffer->index + buffer->entries - 1);
261 EFX_POPULATE_OWORD_4(buf_tbl_upd,
262 FRF_AZ_BUF_UPD_CMD, 0,
263 FRF_AZ_BUF_CLR_CMD, 1,
264 FRF_AZ_BUF_CLR_END_ID, end,
265 FRF_AZ_BUF_CLR_START_ID, start);
266 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
270 * Allocate a new Falcon special buffer
272 * This allocates memory for a new buffer, clears it and allocates a
273 * new buffer ID range. It does not write into Falcon's buffer table.
275 * This call will allocate 4KB buffers, since Falcon can't use 8KB
276 * buffers for event queues and descriptor rings.
278 static int falcon_alloc_special_buffer(struct efx_nic *efx,
279 struct efx_special_buffer *buffer,
282 len = ALIGN(len, FALCON_BUF_SIZE);
284 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
289 buffer->entries = len / FALCON_BUF_SIZE;
290 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
292 /* All zeros is a potentially valid event so memset to 0xff */
293 memset(buffer->addr, 0xff, len);
295 /* Select new buffer ID */
296 buffer->index = efx->next_buffer_table;
297 efx->next_buffer_table += buffer->entries;
299 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
300 "(virt %p phys %llx)\n", buffer->index,
301 buffer->index + buffer->entries - 1,
302 (u64)buffer->dma_addr, len,
303 buffer->addr, (u64)virt_to_phys(buffer->addr));
308 static void falcon_free_special_buffer(struct efx_nic *efx,
309 struct efx_special_buffer *buffer)
314 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
315 "(virt %p phys %llx)\n", buffer->index,
316 buffer->index + buffer->entries - 1,
317 (u64)buffer->dma_addr, buffer->len,
318 buffer->addr, (u64)virt_to_phys(buffer->addr));
320 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
326 /**************************************************************************
328 * Falcon generic buffer handling
329 * These buffers are used for interrupt status and MAC stats
331 **************************************************************************/
333 static int falcon_alloc_buffer(struct efx_nic *efx,
334 struct efx_buffer *buffer, unsigned int len)
336 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
341 memset(buffer->addr, 0, len);
345 static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
348 pci_free_consistent(efx->pci_dev, buffer->len,
349 buffer->addr, buffer->dma_addr);
354 /**************************************************************************
358 **************************************************************************/
360 /* Returns a pointer to the specified transmit descriptor in the TX
361 * descriptor queue belonging to the specified channel.
363 static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
366 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
369 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
370 static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
375 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
376 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
377 efx_writed_page(tx_queue->efx, ®,
378 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
382 /* For each entry inserted into the software descriptor ring, create a
383 * descriptor in the hardware TX descriptor ring (in host memory), and
386 void falcon_push_buffers(struct efx_tx_queue *tx_queue)
389 struct efx_tx_buffer *buffer;
393 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
396 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
397 buffer = &tx_queue->buffer[write_ptr];
398 txd = falcon_tx_desc(tx_queue, write_ptr);
399 ++tx_queue->write_count;
401 /* Create TX descriptor ring entry */
402 EFX_POPULATE_QWORD_4(*txd,
403 FSF_AZ_TX_KER_CONT, buffer->continuation,
404 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
405 FSF_AZ_TX_KER_BUF_REGION, 0,
406 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
407 } while (tx_queue->write_count != tx_queue->insert_count);
409 wmb(); /* Ensure descriptors are written before they are fetched */
410 falcon_notify_tx_desc(tx_queue);
413 /* Allocate hardware resources for a TX queue */
414 int falcon_probe_tx(struct efx_tx_queue *tx_queue)
416 struct efx_nic *efx = tx_queue->efx;
417 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
418 EFX_TXQ_SIZE & EFX_TXQ_MASK);
419 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
420 EFX_TXQ_SIZE * sizeof(efx_qword_t));
423 void falcon_init_tx(struct efx_tx_queue *tx_queue)
425 efx_oword_t tx_desc_ptr;
426 struct efx_nic *efx = tx_queue->efx;
428 tx_queue->flushed = FLUSH_NONE;
430 /* Pin TX descriptor ring */
431 falcon_init_special_buffer(efx, &tx_queue->txd);
433 /* Push TX descriptor ring to card */
434 EFX_POPULATE_OWORD_10(tx_desc_ptr,
435 FRF_AZ_TX_DESCQ_EN, 1,
436 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
437 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
438 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
439 FRF_AZ_TX_DESCQ_EVQ_ID,
440 tx_queue->channel->channel,
441 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
442 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
443 FRF_AZ_TX_DESCQ_SIZE,
444 __ffs(tx_queue->txd.entries),
445 FRF_AZ_TX_DESCQ_TYPE, 0,
446 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
448 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
449 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
450 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
451 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
455 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
458 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
461 /* Only 128 bits in this register */
462 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
464 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
465 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
466 clear_bit_le(tx_queue->queue, (void *)®);
468 set_bit_le(tx_queue->queue, (void *)®);
469 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
473 static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
475 struct efx_nic *efx = tx_queue->efx;
476 efx_oword_t tx_flush_descq;
478 tx_queue->flushed = FLUSH_PENDING;
480 /* Post a flush command */
481 EFX_POPULATE_OWORD_2(tx_flush_descq,
482 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
483 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
484 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
487 void falcon_fini_tx(struct efx_tx_queue *tx_queue)
489 struct efx_nic *efx = tx_queue->efx;
490 efx_oword_t tx_desc_ptr;
492 /* The queue should have been flushed */
493 WARN_ON(tx_queue->flushed != FLUSH_DONE);
495 /* Remove TX descriptor ring from card */
496 EFX_ZERO_OWORD(tx_desc_ptr);
497 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
500 /* Unpin TX descriptor ring */
501 falcon_fini_special_buffer(efx, &tx_queue->txd);
504 /* Free buffers backing TX queue */
505 void falcon_remove_tx(struct efx_tx_queue *tx_queue)
507 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
510 /**************************************************************************
514 **************************************************************************/
516 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
517 static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
520 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
523 /* This creates an entry in the RX descriptor queue */
524 static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
527 struct efx_rx_buffer *rx_buf;
530 rxd = falcon_rx_desc(rx_queue, index);
531 rx_buf = efx_rx_buffer(rx_queue, index);
532 EFX_POPULATE_QWORD_3(*rxd,
533 FSF_AZ_RX_KER_BUF_SIZE,
535 rx_queue->efx->type->rx_buffer_padding,
536 FSF_AZ_RX_KER_BUF_REGION, 0,
537 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
540 /* This writes to the RX_DESC_WPTR register for the specified receive
543 void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
548 while (rx_queue->notified_count != rx_queue->added_count) {
549 falcon_build_rx_desc(rx_queue,
550 rx_queue->notified_count &
552 ++rx_queue->notified_count;
556 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
557 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
558 efx_writed_page(rx_queue->efx, ®,
559 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
562 int falcon_probe_rx(struct efx_rx_queue *rx_queue)
564 struct efx_nic *efx = rx_queue->efx;
565 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
566 EFX_RXQ_SIZE & EFX_RXQ_MASK);
567 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
568 EFX_RXQ_SIZE * sizeof(efx_qword_t));
571 void falcon_init_rx(struct efx_rx_queue *rx_queue)
573 efx_oword_t rx_desc_ptr;
574 struct efx_nic *efx = rx_queue->efx;
575 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
576 bool iscsi_digest_en = is_b0;
578 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
579 rx_queue->queue, rx_queue->rxd.index,
580 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
582 rx_queue->flushed = FLUSH_NONE;
584 /* Pin RX descriptor ring */
585 falcon_init_special_buffer(efx, &rx_queue->rxd);
587 /* Push RX descriptor ring to card */
588 EFX_POPULATE_OWORD_10(rx_desc_ptr,
589 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
590 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
591 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
592 FRF_AZ_RX_DESCQ_EVQ_ID,
593 rx_queue->channel->channel,
594 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
595 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
596 FRF_AZ_RX_DESCQ_SIZE,
597 __ffs(rx_queue->rxd.entries),
598 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
599 /* For >=B0 this is scatter so disable */
600 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
601 FRF_AZ_RX_DESCQ_EN, 1);
602 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
606 static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
608 struct efx_nic *efx = rx_queue->efx;
609 efx_oword_t rx_flush_descq;
611 rx_queue->flushed = FLUSH_PENDING;
613 /* Post a flush command */
614 EFX_POPULATE_OWORD_2(rx_flush_descq,
615 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
616 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
617 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
620 void falcon_fini_rx(struct efx_rx_queue *rx_queue)
622 efx_oword_t rx_desc_ptr;
623 struct efx_nic *efx = rx_queue->efx;
625 /* The queue should already have been flushed */
626 WARN_ON(rx_queue->flushed != FLUSH_DONE);
628 /* Remove RX descriptor ring from card */
629 EFX_ZERO_OWORD(rx_desc_ptr);
630 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
633 /* Unpin RX descriptor ring */
634 falcon_fini_special_buffer(efx, &rx_queue->rxd);
637 /* Free buffers backing RX queue */
638 void falcon_remove_rx(struct efx_rx_queue *rx_queue)
640 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
643 /**************************************************************************
645 * Falcon event queue processing
646 * Event queues are processed by per-channel tasklets.
648 **************************************************************************/
650 /* Update a channel's event queue's read pointer (RPTR) register
652 * This writes the EVQ_RPTR_REG register for the specified channel's
655 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
656 * whereas channel->eventq_read_ptr contains the index of the "next to
659 void falcon_eventq_read_ack(struct efx_channel *channel)
662 struct efx_nic *efx = channel->efx;
664 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
665 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
669 /* Use HW to insert a SW defined event */
670 void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
672 efx_oword_t drv_ev_reg;
674 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
675 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
676 drv_ev_reg.u32[0] = event->u32[0];
677 drv_ev_reg.u32[1] = event->u32[1];
678 drv_ev_reg.u32[2] = 0;
679 drv_ev_reg.u32[3] = 0;
680 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
681 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
684 /* Handle a transmit completion event
686 * Falcon batches TX completion events; the message we receive is of
687 * the form "complete all TX events up to this index".
689 static void falcon_handle_tx_event(struct efx_channel *channel,
692 unsigned int tx_ev_desc_ptr;
693 unsigned int tx_ev_q_label;
694 struct efx_tx_queue *tx_queue;
695 struct efx_nic *efx = channel->efx;
697 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
698 /* Transmit completion */
699 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
700 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
701 tx_queue = &efx->tx_queue[tx_ev_q_label];
702 channel->irq_mod_score +=
703 (tx_ev_desc_ptr - tx_queue->read_count) &
705 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
706 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
707 /* Rewrite the FIFO write pointer */
708 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
709 tx_queue = &efx->tx_queue[tx_ev_q_label];
711 if (efx_dev_registered(efx))
712 netif_tx_lock(efx->net_dev);
713 falcon_notify_tx_desc(tx_queue);
714 if (efx_dev_registered(efx))
715 netif_tx_unlock(efx->net_dev);
716 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
717 EFX_WORKAROUND_10727(efx)) {
718 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
720 EFX_ERR(efx, "channel %d unexpected TX event "
721 EFX_QWORD_FMT"\n", channel->channel,
722 EFX_QWORD_VAL(*event));
726 /* Detect errors included in the rx_evt_pkt_ok bit. */
727 static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
728 const efx_qword_t *event,
732 struct efx_nic *efx = rx_queue->efx;
733 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
734 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
735 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
736 bool rx_ev_other_err, rx_ev_pause_frm;
737 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
738 unsigned rx_ev_pkt_type;
740 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
741 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
742 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
743 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
744 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
745 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
746 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
747 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
748 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
749 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
750 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
751 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
752 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
753 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
754 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
756 /* Every error apart from tobe_disc and pause_frm */
757 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
758 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
759 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
761 /* Count errors that are not in MAC stats. Ignore expected
762 * checksum errors during self-test. */
764 ++rx_queue->channel->n_rx_frm_trunc;
765 else if (rx_ev_tobe_disc)
766 ++rx_queue->channel->n_rx_tobe_disc;
767 else if (!efx->loopback_selftest) {
768 if (rx_ev_ip_hdr_chksum_err)
769 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
770 else if (rx_ev_tcp_udp_chksum_err)
771 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
774 /* The frame must be discarded if any of these are true. */
775 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
776 rx_ev_tobe_disc | rx_ev_pause_frm);
778 /* TOBE_DISC is expected on unicast mismatches; don't print out an
779 * error message. FRM_TRUNC indicates RXDP dropped the packet due
780 * to a FIFO overflow.
782 #ifdef EFX_ENABLE_DEBUG
783 if (rx_ev_other_err) {
784 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
785 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
786 rx_queue->queue, EFX_QWORD_VAL(*event),
787 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
788 rx_ev_ip_hdr_chksum_err ?
789 " [IP_HDR_CHKSUM_ERR]" : "",
790 rx_ev_tcp_udp_chksum_err ?
791 " [TCP_UDP_CHKSUM_ERR]" : "",
792 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
793 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
794 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
795 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
796 rx_ev_pause_frm ? " [PAUSE]" : "");
801 /* Handle receive events that are not in-order. */
802 static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
805 struct efx_nic *efx = rx_queue->efx;
806 unsigned expected, dropped;
808 expected = rx_queue->removed_count & EFX_RXQ_MASK;
809 dropped = (index - expected) & EFX_RXQ_MASK;
810 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
811 dropped, index, expected);
813 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
814 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
817 /* Handle a packet received event
819 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
820 * wrong destination address
821 * Also "is multicast" and "matches multicast filter" flags can be used to
822 * discard non-matching multicast packets.
824 static void falcon_handle_rx_event(struct efx_channel *channel,
825 const efx_qword_t *event)
827 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
828 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
829 unsigned expected_ptr;
830 bool rx_ev_pkt_ok, discard = false, checksummed;
831 struct efx_rx_queue *rx_queue;
832 struct efx_nic *efx = channel->efx;
834 /* Basic packet information */
835 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
836 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
837 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
838 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
839 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
840 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
843 rx_queue = &efx->rx_queue[channel->channel];
845 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
846 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
847 if (unlikely(rx_ev_desc_ptr != expected_ptr))
848 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
850 if (likely(rx_ev_pkt_ok)) {
851 /* If packet is marked as OK and packet type is TCP/IPv4 or
852 * UDP/IPv4, then we can rely on the hardware checksum.
855 likely(efx->rx_checksum_enabled) &&
856 (rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
857 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP);
859 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
864 /* Detect multicast packets that didn't match the filter */
865 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
866 if (rx_ev_mcast_pkt) {
867 unsigned int rx_ev_mcast_hash_match =
868 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
870 if (unlikely(!rx_ev_mcast_hash_match)) {
871 ++channel->n_rx_mcast_mismatch;
876 channel->irq_mod_score += 2;
878 /* Handle received packet */
879 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
880 checksummed, discard);
883 /* Global events are basically PHY events */
884 static void falcon_handle_global_event(struct efx_channel *channel,
887 struct efx_nic *efx = channel->efx;
888 bool handled = false;
890 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
891 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
892 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
897 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
898 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
899 efx->xmac_poll_required = true;
903 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
904 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
905 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
906 EFX_ERR(efx, "channel %d seen global RX_RESET "
907 "event. Resetting.\n", channel->channel);
909 atomic_inc(&efx->rx_reset);
910 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
911 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
916 EFX_ERR(efx, "channel %d unknown global event "
917 EFX_QWORD_FMT "\n", channel->channel,
918 EFX_QWORD_VAL(*event));
921 static void falcon_handle_driver_event(struct efx_channel *channel,
924 struct efx_nic *efx = channel->efx;
925 unsigned int ev_sub_code;
926 unsigned int ev_sub_data;
928 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
929 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
931 switch (ev_sub_code) {
932 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
933 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
934 channel->channel, ev_sub_data);
936 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
937 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
938 channel->channel, ev_sub_data);
940 case FSE_AZ_EVQ_INIT_DONE_EV:
941 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
942 channel->channel, ev_sub_data);
944 case FSE_AZ_SRM_UPD_DONE_EV:
945 EFX_TRACE(efx, "channel %d SRAM update done\n",
948 case FSE_AZ_WAKE_UP_EV:
949 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
950 channel->channel, ev_sub_data);
952 case FSE_AZ_TIMER_EV:
953 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
954 channel->channel, ev_sub_data);
956 case FSE_AA_RX_RECOVER_EV:
957 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
958 "Resetting.\n", channel->channel);
959 atomic_inc(&efx->rx_reset);
960 efx_schedule_reset(efx,
961 EFX_WORKAROUND_6555(efx) ?
962 RESET_TYPE_RX_RECOVERY :
965 case FSE_BZ_RX_DSC_ERROR_EV:
966 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
967 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
968 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
970 case FSE_BZ_TX_DSC_ERROR_EV:
971 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
972 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
973 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
976 EFX_TRACE(efx, "channel %d unknown driver event code %d "
977 "data %04x\n", channel->channel, ev_sub_code,
983 int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
985 unsigned int read_ptr;
986 efx_qword_t event, *p_event;
990 read_ptr = channel->eventq_read_ptr;
993 p_event = falcon_event(channel, read_ptr);
996 if (!falcon_event_present(&event))
1000 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1001 channel->channel, EFX_QWORD_VAL(event));
1003 /* Clear this event by marking it all ones */
1004 EFX_SET_QWORD(*p_event);
1006 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1009 case FSE_AZ_EV_CODE_RX_EV:
1010 falcon_handle_rx_event(channel, &event);
1013 case FSE_AZ_EV_CODE_TX_EV:
1014 falcon_handle_tx_event(channel, &event);
1016 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1017 channel->eventq_magic = EFX_QWORD_FIELD(
1018 event, FSF_AZ_DRV_GEN_EV_MAGIC);
1019 EFX_LOG(channel->efx, "channel %d received generated "
1020 "event "EFX_QWORD_FMT"\n", channel->channel,
1021 EFX_QWORD_VAL(event));
1023 case FSE_AZ_EV_CODE_GLOBAL_EV:
1024 falcon_handle_global_event(channel, &event);
1026 case FSE_AZ_EV_CODE_DRIVER_EV:
1027 falcon_handle_driver_event(channel, &event);
1030 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1031 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1032 ev_code, EFX_QWORD_VAL(event));
1035 /* Increment read pointer */
1036 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1038 } while (rx_packets < rx_quota);
1040 channel->eventq_read_ptr = read_ptr;
1044 void falcon_set_int_moderation(struct efx_channel *channel)
1046 efx_dword_t timer_cmd;
1047 struct efx_nic *efx = channel->efx;
1049 /* Set timer register */
1050 if (channel->irq_moderation) {
1051 EFX_POPULATE_DWORD_2(timer_cmd,
1052 FRF_AB_TC_TIMER_MODE,
1053 FFE_BB_TIMER_MODE_INT_HLDOFF,
1054 FRF_AB_TC_TIMER_VAL,
1055 channel->irq_moderation - 1);
1057 EFX_POPULATE_DWORD_2(timer_cmd,
1058 FRF_AB_TC_TIMER_MODE,
1059 FFE_BB_TIMER_MODE_DIS,
1060 FRF_AB_TC_TIMER_VAL, 0);
1062 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1063 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
1068 /* Allocate buffer table entries for event queue */
1069 int falcon_probe_eventq(struct efx_channel *channel)
1071 struct efx_nic *efx = channel->efx;
1072 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1073 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1074 return falcon_alloc_special_buffer(efx, &channel->eventq,
1075 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1078 void falcon_init_eventq(struct efx_channel *channel)
1080 efx_oword_t evq_ptr;
1081 struct efx_nic *efx = channel->efx;
1083 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1084 channel->channel, channel->eventq.index,
1085 channel->eventq.index + channel->eventq.entries - 1);
1087 /* Pin event queue buffer */
1088 falcon_init_special_buffer(efx, &channel->eventq);
1090 /* Fill event queue with all ones (i.e. empty events) */
1091 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1093 /* Push event queue to card */
1094 EFX_POPULATE_OWORD_3(evq_ptr,
1096 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1097 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1098 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1101 falcon_set_int_moderation(channel);
1104 void falcon_fini_eventq(struct efx_channel *channel)
1106 efx_oword_t eventq_ptr;
1107 struct efx_nic *efx = channel->efx;
1109 /* Remove event queue from card */
1110 EFX_ZERO_OWORD(eventq_ptr);
1111 efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1114 /* Unpin event queue */
1115 falcon_fini_special_buffer(efx, &channel->eventq);
1118 /* Free buffers backing event queue */
1119 void falcon_remove_eventq(struct efx_channel *channel)
1121 falcon_free_special_buffer(channel->efx, &channel->eventq);
1125 /* Generates a test event on the event queue. A subsequent call to
1126 * process_eventq() should pick up the event and place the value of
1127 * "magic" into channel->eventq_magic;
1129 void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1131 efx_qword_t test_event;
1133 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1134 FSE_AZ_EV_CODE_DRV_GEN_EV,
1135 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1136 falcon_generate_event(channel, &test_event);
1139 /**************************************************************************
1143 **************************************************************************/
1146 static void falcon_poll_flush_events(struct efx_nic *efx)
1148 struct efx_channel *channel = &efx->channel[0];
1149 struct efx_tx_queue *tx_queue;
1150 struct efx_rx_queue *rx_queue;
1151 unsigned int read_ptr = channel->eventq_read_ptr;
1152 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1155 efx_qword_t *event = falcon_event(channel, read_ptr);
1156 int ev_code, ev_sub_code, ev_queue;
1159 if (!falcon_event_present(event))
1162 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1163 ev_sub_code = EFX_QWORD_FIELD(*event,
1164 FSF_AZ_DRIVER_EV_SUBCODE);
1165 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1166 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1167 ev_queue = EFX_QWORD_FIELD(*event,
1168 FSF_AZ_DRIVER_EV_SUBDATA);
1169 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1170 tx_queue = efx->tx_queue + ev_queue;
1171 tx_queue->flushed = FLUSH_DONE;
1173 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1174 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1175 ev_queue = EFX_QWORD_FIELD(
1176 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1177 ev_failed = EFX_QWORD_FIELD(
1178 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1179 if (ev_queue < efx->n_rx_queues) {
1180 rx_queue = efx->rx_queue + ev_queue;
1182 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1186 /* We're about to destroy the queue anyway, so
1187 * it's ok to throw away every non-flush event */
1188 EFX_SET_QWORD(*event);
1190 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1191 } while (read_ptr != end_ptr);
1193 channel->eventq_read_ptr = read_ptr;
1196 static void falcon_prepare_flush(struct efx_nic *efx)
1198 falcon_deconfigure_mac_wrapper(efx);
1200 /* Wait for the tx and rx fifo's to get to the next packet boundary
1201 * (~1ms without back-pressure), then to drain the remainder of the
1202 * fifo's at data path speeds (negligible), with a healthy margin. */
1206 /* Handle tx and rx flushes at the same time, since they run in
1207 * parallel in the hardware and there's no reason for us to
1209 int falcon_flush_queues(struct efx_nic *efx)
1211 struct efx_rx_queue *rx_queue;
1212 struct efx_tx_queue *tx_queue;
1213 int i, tx_pending, rx_pending;
1215 falcon_prepare_flush(efx);
1217 /* Flush all tx queues in parallel */
1218 efx_for_each_tx_queue(tx_queue, efx)
1219 falcon_flush_tx_queue(tx_queue);
1221 /* The hardware supports four concurrent rx flushes, each of which may
1222 * need to be retried if there is an outstanding descriptor fetch */
1223 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1224 rx_pending = tx_pending = 0;
1225 efx_for_each_rx_queue(rx_queue, efx) {
1226 if (rx_queue->flushed == FLUSH_PENDING)
1229 efx_for_each_rx_queue(rx_queue, efx) {
1230 if (rx_pending == FALCON_RX_FLUSH_COUNT)
1232 if (rx_queue->flushed == FLUSH_FAILED ||
1233 rx_queue->flushed == FLUSH_NONE) {
1234 falcon_flush_rx_queue(rx_queue);
1238 efx_for_each_tx_queue(tx_queue, efx) {
1239 if (tx_queue->flushed != FLUSH_DONE)
1243 if (rx_pending == 0 && tx_pending == 0)
1246 msleep(FALCON_FLUSH_INTERVAL);
1247 falcon_poll_flush_events(efx);
1250 /* Mark the queues as all flushed. We're going to return failure
1251 * leading to a reset, or fake up success anyway */
1252 efx_for_each_tx_queue(tx_queue, efx) {
1253 if (tx_queue->flushed != FLUSH_DONE)
1254 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1256 tx_queue->flushed = FLUSH_DONE;
1258 efx_for_each_rx_queue(rx_queue, efx) {
1259 if (rx_queue->flushed != FLUSH_DONE)
1260 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1262 rx_queue->flushed = FLUSH_DONE;
1265 if (EFX_WORKAROUND_7803(efx))
1271 /**************************************************************************
1273 * Falcon hardware interrupts
1274 * The hardware interrupt handler does very little work; all the event
1275 * queue processing is carried out by per-channel tasklets.
1277 **************************************************************************/
1279 /* Enable/disable/generate Falcon interrupts */
1280 static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1283 efx_oword_t int_en_reg_ker;
1285 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1286 FRF_AZ_KER_INT_KER, force,
1287 FRF_AZ_DRV_INT_EN_KER, enabled);
1288 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1291 void falcon_enable_interrupts(struct efx_nic *efx)
1293 struct efx_channel *channel;
1295 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1296 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1298 /* Enable interrupts */
1299 falcon_interrupts(efx, 1, 0);
1301 /* Force processing of all the channels to get the EVQ RPTRs up to
1303 efx_for_each_channel(channel, efx)
1304 efx_schedule_channel(channel);
1307 void falcon_disable_interrupts(struct efx_nic *efx)
1309 /* Disable interrupts */
1310 falcon_interrupts(efx, 0, 0);
1313 /* Generate a Falcon test interrupt
1314 * Interrupt must already have been enabled, otherwise nasty things
1317 void falcon_generate_interrupt(struct efx_nic *efx)
1319 falcon_interrupts(efx, 1, 1);
1322 /* Acknowledge a legacy interrupt from Falcon
1324 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1326 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1327 * BIU. Interrupt acknowledge is read sensitive so must write instead
1328 * (then read to ensure the BIU collector is flushed)
1330 * NB most hardware supports MSI interrupts
1332 static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1336 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1337 efx_writed(efx, ®, FR_AA_INT_ACK_KER);
1338 efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1341 /* Process a fatal interrupt
1342 * Disable bus mastering ASAP and schedule a reset
1344 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1346 struct falcon_nic_data *nic_data = efx->nic_data;
1347 efx_oword_t *int_ker = efx->irq_status.addr;
1348 efx_oword_t fatal_intr;
1349 int error, mem_perr;
1351 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1352 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1354 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1355 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1356 EFX_OWORD_VAL(fatal_intr),
1357 error ? "disabling bus mastering" : "no recognised error");
1361 /* If this is a memory parity error dump which blocks are offending */
1362 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1365 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1366 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1367 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1370 /* Disable both devices */
1371 pci_clear_master(efx->pci_dev);
1372 if (FALCON_IS_DUAL_FUNC(efx))
1373 pci_clear_master(nic_data->pci_dev2);
1374 falcon_disable_interrupts(efx);
1376 /* Count errors and reset or disable the NIC accordingly */
1377 if (efx->int_error_count == 0 ||
1378 time_after(jiffies, efx->int_error_expire)) {
1379 efx->int_error_count = 0;
1380 efx->int_error_expire =
1381 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1383 if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) {
1384 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1385 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1387 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1388 "NIC will be disabled\n");
1389 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1395 /* Handle a legacy interrupt from Falcon
1396 * Acknowledges the interrupt and schedule event queue processing.
1398 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1400 struct efx_nic *efx = dev_id;
1401 efx_oword_t *int_ker = efx->irq_status.addr;
1402 irqreturn_t result = IRQ_NONE;
1403 struct efx_channel *channel;
1408 /* Read the ISR which also ACKs the interrupts */
1409 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1410 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1412 /* Check to see if we have a serious error condition */
1413 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1414 if (unlikely(syserr))
1415 return falcon_fatal_interrupt(efx);
1417 /* Schedule processing of any interrupting queues */
1418 efx_for_each_channel(channel, efx) {
1420 falcon_event_present(
1421 falcon_event(channel, channel->eventq_read_ptr))) {
1422 efx_schedule_channel(channel);
1423 result = IRQ_HANDLED;
1428 if (result == IRQ_HANDLED) {
1429 efx->last_irq_cpu = raw_smp_processor_id();
1430 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1431 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1438 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1440 struct efx_nic *efx = dev_id;
1441 efx_oword_t *int_ker = efx->irq_status.addr;
1442 struct efx_channel *channel;
1446 /* Check to see if this is our interrupt. If it isn't, we
1447 * exit without having touched the hardware.
1449 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1450 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1451 raw_smp_processor_id());
1454 efx->last_irq_cpu = raw_smp_processor_id();
1455 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1456 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1458 /* Check to see if we have a serious error condition */
1459 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1460 if (unlikely(syserr))
1461 return falcon_fatal_interrupt(efx);
1463 /* Determine interrupting queues, clear interrupt status
1464 * register and acknowledge the device interrupt.
1466 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1467 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1468 EFX_ZERO_OWORD(*int_ker);
1469 wmb(); /* Ensure the vector is cleared before interrupt ack */
1470 falcon_irq_ack_a1(efx);
1472 /* Schedule processing of any interrupting queues */
1473 channel = &efx->channel[0];
1476 efx_schedule_channel(channel);
1484 /* Handle an MSI interrupt from Falcon
1486 * Handle an MSI hardware interrupt. This routine schedules event
1487 * queue processing. No interrupt acknowledgement cycle is necessary.
1488 * Also, we never need to check that the interrupt is for us, since
1489 * MSI interrupts cannot be shared.
1491 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1493 struct efx_channel *channel = dev_id;
1494 struct efx_nic *efx = channel->efx;
1495 efx_oword_t *int_ker = efx->irq_status.addr;
1498 efx->last_irq_cpu = raw_smp_processor_id();
1499 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1500 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1502 /* Check to see if we have a serious error condition */
1503 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1504 if (unlikely(syserr))
1505 return falcon_fatal_interrupt(efx);
1507 /* Schedule processing of the channel */
1508 efx_schedule_channel(channel);
1514 /* Setup RSS indirection table.
1515 * This maps from the hash value of the packet to RXQ
1517 static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1520 unsigned long offset;
1523 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1526 for (offset = FR_BZ_RX_INDIRECTION_TBL;
1527 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1529 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1530 i % efx->n_rx_queues);
1531 efx_writed(efx, &dword, offset);
1536 /* Hook interrupt handler(s)
1537 * Try MSI and then legacy interrupts.
1539 int falcon_init_interrupt(struct efx_nic *efx)
1541 struct efx_channel *channel;
1544 if (!EFX_INT_MODE_USE_MSI(efx)) {
1545 irq_handler_t handler;
1546 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1547 handler = falcon_legacy_interrupt_b0;
1549 handler = falcon_legacy_interrupt_a1;
1551 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1554 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1561 /* Hook MSI or MSI-X interrupt */
1562 efx_for_each_channel(channel, efx) {
1563 rc = request_irq(channel->irq, falcon_msi_interrupt,
1564 IRQF_PROBE_SHARED, /* Not shared */
1565 channel->name, channel);
1567 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1575 efx_for_each_channel(channel, efx)
1576 free_irq(channel->irq, channel);
1581 void falcon_fini_interrupt(struct efx_nic *efx)
1583 struct efx_channel *channel;
1586 /* Disable MSI/MSI-X interrupts */
1587 efx_for_each_channel(channel, efx) {
1589 free_irq(channel->irq, channel);
1592 /* ACK legacy interrupt */
1593 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1594 efx_reado(efx, ®, FR_BZ_INT_ISR0);
1596 falcon_irq_ack_a1(efx);
1598 /* Disable legacy interrupt */
1599 if (efx->legacy_irq)
1600 free_irq(efx->legacy_irq, efx);
1603 /**************************************************************************
1607 **************************************************************************
1610 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1612 static int falcon_spi_poll(struct efx_nic *efx)
1615 efx_reado(efx, ®, FR_AB_EE_SPI_HCMD);
1616 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1619 /* Wait for SPI command completion */
1620 static int falcon_spi_wait(struct efx_nic *efx)
1622 /* Most commands will finish quickly, so we start polling at
1623 * very short intervals. Sometimes the command may have to
1624 * wait for VPD or expansion ROM access outside of our
1625 * control, so we allow up to 100 ms. */
1626 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
1629 for (i = 0; i < 10; i++) {
1630 if (!falcon_spi_poll(efx))
1636 if (!falcon_spi_poll(efx))
1638 if (time_after_eq(jiffies, timeout)) {
1639 EFX_ERR(efx, "timed out waiting for SPI\n");
1642 schedule_timeout_uninterruptible(1);
1646 int falcon_spi_cmd(const struct efx_spi_device *spi,
1647 unsigned int command, int address,
1648 const void *in, void *out, size_t len)
1650 struct efx_nic *efx = spi->efx;
1651 bool addressed = (address >= 0);
1652 bool reading = (out != NULL);
1656 /* Input validation */
1657 if (len > FALCON_SPI_MAX_LEN)
1659 BUG_ON(!mutex_is_locked(&efx->spi_lock));
1661 /* Check that previous command is not still running */
1662 rc = falcon_spi_poll(efx);
1666 /* Program address register, if we have an address */
1668 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1669 efx_writeo(efx, ®, FR_AB_EE_SPI_HADR);
1672 /* Program data register, if we have data */
1674 memcpy(®, in, len);
1675 efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA);
1678 /* Issue read/write command */
1679 EFX_POPULATE_OWORD_7(reg,
1680 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
1681 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
1682 FRF_AB_EE_SPI_HCMD_DABCNT, len,
1683 FRF_AB_EE_SPI_HCMD_READ, reading,
1684 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
1685 FRF_AB_EE_SPI_HCMD_ADBCNT,
1686 (addressed ? spi->addr_len : 0),
1687 FRF_AB_EE_SPI_HCMD_ENC, command);
1688 efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD);
1690 /* Wait for read/write to complete */
1691 rc = falcon_spi_wait(efx);
1697 efx_reado(efx, ®, FR_AB_EE_SPI_HDATA);
1698 memcpy(out, ®, len);
1705 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1707 return min(FALCON_SPI_MAX_LEN,
1708 (spi->block_size - (start & (spi->block_size - 1))));
1712 efx_spi_munge_command(const struct efx_spi_device *spi,
1713 const u8 command, const unsigned int address)
1715 return command | (((address >> 8) & spi->munge_address) << 3);
1718 /* Wait up to 10 ms for buffered write completion */
1719 int falcon_spi_wait_write(const struct efx_spi_device *spi)
1721 struct efx_nic *efx = spi->efx;
1722 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1727 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1728 &status, sizeof(status));
1731 if (!(status & SPI_STATUS_NRDY))
1733 if (time_after_eq(jiffies, timeout)) {
1734 EFX_ERR(efx, "SPI write timeout on device %d"
1735 " last status=0x%02x\n",
1736 spi->device_id, status);
1739 schedule_timeout_uninterruptible(1);
1743 int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1744 size_t len, size_t *retlen, u8 *buffer)
1746 size_t block_len, pos = 0;
1747 unsigned int command;
1751 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1753 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1754 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
1755 buffer + pos, block_len);
1760 /* Avoid locking up the system */
1762 if (signal_pending(current)) {
1773 int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1774 size_t len, size_t *retlen, const u8 *buffer)
1776 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1777 size_t block_len, pos = 0;
1778 unsigned int command;
1782 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
1786 block_len = min(len - pos,
1787 falcon_spi_write_limit(spi, start + pos));
1788 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1789 rc = falcon_spi_cmd(spi, command, start + pos,
1790 buffer + pos, NULL, block_len);
1794 rc = falcon_spi_wait_write(spi);
1798 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1799 rc = falcon_spi_cmd(spi, command, start + pos,
1800 NULL, verify_buffer, block_len);
1801 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1808 /* Avoid locking up the system */
1810 if (signal_pending(current)) {
1821 /**************************************************************************
1825 **************************************************************************
1828 static int falcon_reset_macs(struct efx_nic *efx)
1833 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
1834 /* It's not safe to use GLB_CTL_REG to reset the
1835 * macs, so instead use the internal MAC resets
1837 if (!EFX_IS10G(efx)) {
1838 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1839 efx_writeo(efx, ®, FR_AB_GM_CFG1);
1842 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1843 efx_writeo(efx, ®, FR_AB_GM_CFG1);
1847 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1848 efx_writeo(efx, ®, FR_AB_XM_GLB_CFG);
1850 for (count = 0; count < 10000; count++) {
1851 efx_reado(efx, ®, FR_AB_XM_GLB_CFG);
1852 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1858 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1863 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1864 * the drain sequence with the statistics fetch */
1865 falcon_stop_nic_stats(efx);
1867 efx_reado(efx, ®, FR_AB_MAC_CTRL);
1868 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
1869 efx_writeo(efx, ®, FR_AB_MAC_CTRL);
1871 efx_reado(efx, ®, FR_AB_GLB_CTL);
1872 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1873 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1874 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1875 efx_writeo(efx, ®, FR_AB_GLB_CTL);
1879 efx_reado(efx, ®, FR_AB_GLB_CTL);
1880 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1881 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1882 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1883 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1888 EFX_ERR(efx, "MAC reset failed\n");
1895 /* If we've reset the EM block and the link is up, then
1896 * we'll have to kick the XAUI link so the PHY can recover */
1897 if (efx->link_state.up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1898 falcon_reset_xaui(efx);
1900 falcon_start_nic_stats(efx);
1905 void falcon_drain_tx_fifo(struct efx_nic *efx)
1909 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
1910 (efx->loopback_mode != LOOPBACK_NONE))
1913 efx_reado(efx, ®, FR_AB_MAC_CTRL);
1914 /* There is no point in draining more than once */
1915 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1918 falcon_reset_macs(efx);
1921 void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1925 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1928 /* Isolate the MAC -> RX */
1929 efx_reado(efx, ®, FR_AZ_RX_CFG);
1930 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1931 efx_writeo(efx, ®, FR_AZ_RX_CFG);
1933 if (!efx->link_state.up)
1934 falcon_drain_tx_fifo(efx);
1937 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1939 struct efx_link_state *link_state = &efx->link_state;
1944 switch (link_state->speed) {
1945 case 10000: link_speed = 3; break;
1946 case 1000: link_speed = 2; break;
1947 case 100: link_speed = 1; break;
1948 default: link_speed = 0; break;
1950 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1951 * as advertised. Disable to ensure packets are not
1952 * indefinitely held and TX queue can be flushed at any point
1953 * while the link is down. */
1954 EFX_POPULATE_OWORD_5(reg,
1955 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1956 FRF_AB_MAC_BCAD_ACPT, 1,
1957 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1958 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1959 FRF_AB_MAC_SPEED, link_speed);
1960 /* On B0, MAC backpressure can be disabled and packets get
1962 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1963 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1967 efx_writeo(efx, ®, FR_AB_MAC_CTRL);
1969 /* Restore the multicast hash registers. */
1970 falcon_push_multicast_hash(efx);
1972 /* Transmission of pause frames when RX crosses the threshold is
1973 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1974 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1975 tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
1976 efx_reado(efx, ®, FR_AZ_RX_CFG);
1977 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
1979 /* Unisolate the MAC -> RX */
1980 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1981 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1982 efx_writeo(efx, ®, FR_AZ_RX_CFG);
1985 static void falcon_stats_request(struct efx_nic *efx)
1987 struct falcon_nic_data *nic_data = efx->nic_data;
1990 WARN_ON(nic_data->stats_pending);
1991 WARN_ON(nic_data->stats_disable_count);
1993 if (nic_data->stats_dma_done == NULL)
1994 return; /* no mac selected */
1996 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
1997 nic_data->stats_pending = true;
1998 wmb(); /* ensure done flag is clear */
2000 /* Initiate DMA transfer of stats */
2001 EFX_POPULATE_OWORD_2(reg,
2002 FRF_AB_MAC_STAT_DMA_CMD, 1,
2003 FRF_AB_MAC_STAT_DMA_ADR,
2004 efx->stats_buffer.dma_addr);
2005 efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA);
2007 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
2010 static void falcon_stats_complete(struct efx_nic *efx)
2012 struct falcon_nic_data *nic_data = efx->nic_data;
2014 if (!nic_data->stats_pending)
2017 nic_data->stats_pending = 0;
2018 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
2019 rmb(); /* read the done flag before the stats */
2020 efx->mac_op->update_stats(efx);
2022 EFX_ERR(efx, "timed out waiting for statistics\n");
2026 static void falcon_stats_timer_func(unsigned long context)
2028 struct efx_nic *efx = (struct efx_nic *)context;
2029 struct falcon_nic_data *nic_data = efx->nic_data;
2031 spin_lock(&efx->stats_lock);
2033 falcon_stats_complete(efx);
2034 if (nic_data->stats_disable_count == 0)
2035 falcon_stats_request(efx);
2037 spin_unlock(&efx->stats_lock);
2040 static bool falcon_loopback_link_poll(struct efx_nic *efx)
2042 struct efx_link_state old_state = efx->link_state;
2044 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2045 WARN_ON(!LOOPBACK_INTERNAL(efx));
2047 efx->link_state.fd = true;
2048 efx->link_state.fc = efx->wanted_fc;
2049 efx->link_state.up = true;
2051 if (efx->loopback_mode == LOOPBACK_GMAC)
2052 efx->link_state.speed = 1000;
2054 efx->link_state.speed = 10000;
2056 return !efx_link_state_equal(&efx->link_state, &old_state);
2059 /**************************************************************************
2061 * PHY access via GMII
2063 **************************************************************************
2066 /* Wait for GMII access to complete */
2067 static int falcon_gmii_wait(struct efx_nic *efx)
2069 efx_oword_t md_stat;
2072 /* wait upto 50ms - taken max from datasheet */
2073 for (count = 0; count < 5000; count++) {
2074 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
2075 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
2076 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
2077 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2078 EFX_ERR(efx, "error from GMII access "
2080 EFX_OWORD_VAL(md_stat));
2087 EFX_ERR(efx, "timed out waiting for GMII\n");
2091 /* Write an MDIO register of a PHY connected to Falcon. */
2092 static int falcon_mdio_write(struct net_device *net_dev,
2093 int prtad, int devad, u16 addr, u16 value)
2095 struct efx_nic *efx = netdev_priv(net_dev);
2099 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
2100 prtad, devad, addr, value);
2102 mutex_lock(&efx->mdio_lock);
2104 /* Check MDIO not currently being accessed */
2105 rc = falcon_gmii_wait(efx);
2109 /* Write the address/ID register */
2110 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2111 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
2113 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2114 FRF_AB_MD_DEV_ADR, devad);
2115 efx_writeo(efx, ®, FR_AB_MD_ID);
2118 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2119 efx_writeo(efx, ®, FR_AB_MD_TXD);
2121 EFX_POPULATE_OWORD_2(reg,
2124 efx_writeo(efx, ®, FR_AB_MD_CS);
2126 /* Wait for data to be written */
2127 rc = falcon_gmii_wait(efx);
2129 /* Abort the write operation */
2130 EFX_POPULATE_OWORD_2(reg,
2133 efx_writeo(efx, ®, FR_AB_MD_CS);
2138 mutex_unlock(&efx->mdio_lock);
2142 /* Read an MDIO register of a PHY connected to Falcon. */
2143 static int falcon_mdio_read(struct net_device *net_dev,
2144 int prtad, int devad, u16 addr)
2146 struct efx_nic *efx = netdev_priv(net_dev);
2150 mutex_lock(&efx->mdio_lock);
2152 /* Check MDIO not currently being accessed */
2153 rc = falcon_gmii_wait(efx);
2157 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2158 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
2160 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2161 FRF_AB_MD_DEV_ADR, devad);
2162 efx_writeo(efx, ®, FR_AB_MD_ID);
2164 /* Request data to be read */
2165 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2166 efx_writeo(efx, ®, FR_AB_MD_CS);
2168 /* Wait for data to become available */
2169 rc = falcon_gmii_wait(efx);
2171 efx_reado(efx, ®, FR_AB_MD_RXD);
2172 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2173 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2174 prtad, devad, addr, rc);
2176 /* Abort the read operation */
2177 EFX_POPULATE_OWORD_2(reg,
2180 efx_writeo(efx, ®, FR_AB_MD_CS);
2182 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2183 prtad, devad, addr, rc);
2187 mutex_unlock(&efx->mdio_lock);
2191 static void falcon_clock_mac(struct efx_nic *efx)
2194 efx_oword_t nic_stat;
2196 /* Configure the NIC generated MAC clock correctly */
2197 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2198 strap_val = EFX_IS10G(efx) ? 5 : 3;
2199 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2200 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
2201 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
2202 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
2204 /* Falcon A1 does not support 1G/10G speed switching
2205 * and must not be used with a PHY that does. */
2206 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
2211 int falcon_switch_mac(struct efx_nic *efx)
2213 struct efx_mac_operations *old_mac_op = efx->mac_op;
2214 struct falcon_nic_data *nic_data = efx->nic_data;
2215 unsigned int stats_done_offset;
2218 /* Don't try to fetch MAC stats while we're switching MACs */
2219 falcon_stop_nic_stats(efx);
2221 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2222 efx->mac_op = (EFX_IS10G(efx) ?
2223 &falcon_xmac_operations : &falcon_gmac_operations);
2226 stats_done_offset = XgDmaDone_offset;
2228 stats_done_offset = GDmaDone_offset;
2229 nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
2231 if (old_mac_op == efx->mac_op)
2234 falcon_clock_mac(efx);
2236 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2237 /* Not all macs support a mac-level link state */
2238 efx->xmac_poll_required = false;
2240 rc = falcon_reset_macs(efx);
2242 falcon_start_nic_stats(efx);
2246 /* This call is responsible for hooking in the MAC and PHY operations */
2247 int falcon_probe_port(struct efx_nic *efx)
2251 switch (efx->phy_type) {
2252 case PHY_TYPE_SFX7101:
2253 efx->phy_op = &falcon_sfx7101_phy_ops;
2255 case PHY_TYPE_SFT9001A:
2256 case PHY_TYPE_SFT9001B:
2257 efx->phy_op = &falcon_sft9001_phy_ops;
2259 case PHY_TYPE_QT2022C2:
2260 case PHY_TYPE_QT2025C:
2261 efx->phy_op = &falcon_qt202x_phy_ops;
2264 EFX_ERR(efx, "Unknown PHY type %d\n",
2269 if (efx->phy_op->macs & EFX_XMAC)
2270 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2271 (1 << LOOPBACK_XGXS) |
2272 (1 << LOOPBACK_XAUI));
2273 if (efx->phy_op->macs & EFX_GMAC)
2274 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2275 efx->loopback_modes |= efx->phy_op->loopbacks;
2277 /* Set up MDIO structure for PHY */
2278 efx->mdio.mmds = efx->phy_op->mmds;
2279 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
2280 efx->mdio.mdio_read = falcon_mdio_read;
2281 efx->mdio.mdio_write = falcon_mdio_write;
2283 /* Initial assumption */
2284 efx->link_state.speed = 10000;
2285 efx->link_state.fd = true;
2287 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2288 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
2289 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2291 efx->wanted_fc = EFX_FC_RX;
2293 /* Allocate buffer for stats */
2294 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2295 FALCON_MAC_STATS_SIZE);
2298 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
2299 (u64)efx->stats_buffer.dma_addr,
2300 efx->stats_buffer.addr,
2301 (u64)virt_to_phys(efx->stats_buffer.addr));
2306 void falcon_remove_port(struct efx_nic *efx)
2308 falcon_free_buffer(efx, &efx->stats_buffer);
2311 /**************************************************************************
2313 * Multicast filtering
2315 **************************************************************************
2318 void falcon_push_multicast_hash(struct efx_nic *efx)
2320 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2322 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2324 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
2325 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
2329 /**************************************************************************
2333 **************************************************************************/
2335 int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2337 struct falcon_nvconfig *nvconfig;
2338 struct efx_spi_device *spi;
2340 int rc, magic_num, struct_ver;
2341 __le16 *word, *limit;
2344 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2348 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2351 nvconfig = region + FALCON_NVCONFIG_OFFSET;
2353 mutex_lock(&efx->spi_lock);
2354 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2355 mutex_unlock(&efx->spi_lock);
2357 EFX_ERR(efx, "Failed to read %s\n",
2358 efx->spi_flash ? "flash" : "EEPROM");
2363 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2364 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2367 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
2368 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2371 if (struct_ver < 2) {
2372 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
2374 } else if (struct_ver < 4) {
2375 word = &nvconfig->board_magic_num;
2376 limit = (__le16 *) (nvconfig + 1);
2379 limit = region + FALCON_NVCONFIG_END;
2381 for (csum = 0; word < limit; ++word)
2382 csum += le16_to_cpu(*word);
2384 if (~csum & 0xffff) {
2385 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
2391 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
2398 /* Registers tested in the falcon register test */
2402 } efx_test_registers[] = {
2404 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2406 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2408 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2409 { FR_AZ_TX_RESERVED,
2410 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2412 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2413 { FR_AZ_SRM_TX_DC_CFG,
2414 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2416 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2417 { FR_AZ_RX_DC_PF_WM,
2418 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2420 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2422 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2424 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2426 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2428 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2430 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2431 { FR_AB_XM_RX_PARAM,
2432 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2434 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2436 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2438 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2441 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2442 const efx_oword_t *mask)
2444 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
2445 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2448 int falcon_test_registers(struct efx_nic *efx)
2450 unsigned address = 0, i, j;
2451 efx_oword_t mask, imask, original, reg, buf;
2453 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2454 WARN_ON(!LOOPBACK_INTERNAL(efx));
2456 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2457 address = efx_test_registers[i].address;
2458 mask = imask = efx_test_registers[i].mask;
2459 EFX_INVERT_OWORD(imask);
2461 efx_reado(efx, &original, address);
2463 /* bit sweep on and off */
2464 for (j = 0; j < 128; j++) {
2465 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2468 /* Test this testable bit can be set in isolation */
2469 EFX_AND_OWORD(reg, original, mask);
2470 EFX_SET_OWORD32(reg, j, j, 1);
2472 efx_writeo(efx, ®, address);
2473 efx_reado(efx, &buf, address);
2475 if (efx_masked_compare_oword(®, &buf, &mask))
2478 /* Test this testable bit can be cleared in isolation */
2479 EFX_OR_OWORD(reg, original, mask);
2480 EFX_SET_OWORD32(reg, j, j, 0);
2482 efx_writeo(efx, ®, address);
2483 efx_reado(efx, &buf, address);
2485 if (efx_masked_compare_oword(®, &buf, &mask))
2489 efx_writeo(efx, &original, address);
2495 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2496 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2497 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2501 /**************************************************************************
2505 **************************************************************************
2508 /* Resets NIC to known state. This routine must be called in process
2509 * context and is allowed to sleep. */
2510 int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2512 struct falcon_nic_data *nic_data = efx->nic_data;
2513 efx_oword_t glb_ctl_reg_ker;
2516 EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
2518 /* Initiate device reset */
2519 if (method == RESET_TYPE_WORLD) {
2520 rc = pci_save_state(efx->pci_dev);
2522 EFX_ERR(efx, "failed to backup PCI state of primary "
2523 "function prior to hardware reset\n");
2526 if (FALCON_IS_DUAL_FUNC(efx)) {
2527 rc = pci_save_state(nic_data->pci_dev2);
2529 EFX_ERR(efx, "failed to backup PCI state of "
2530 "secondary function prior to "
2531 "hardware reset\n");
2536 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2537 FRF_AB_EXT_PHY_RST_DUR,
2538 FFE_AB_EXT_PHY_RST_DUR_10240US,
2541 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2542 /* exclude PHY from "invisible" reset */
2543 FRF_AB_EXT_PHY_RST_CTL,
2544 method == RESET_TYPE_INVISIBLE,
2545 /* exclude EEPROM/flash and PCIe */
2546 FRF_AB_PCIE_CORE_RST_CTL, 1,
2547 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2548 FRF_AB_PCIE_SD_RST_CTL, 1,
2549 FRF_AB_EE_RST_CTL, 1,
2550 FRF_AB_EXT_PHY_RST_DUR,
2551 FFE_AB_EXT_PHY_RST_DUR_10240US,
2554 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2556 EFX_LOG(efx, "waiting for hardware reset\n");
2557 schedule_timeout_uninterruptible(HZ / 20);
2559 /* Restore PCI configuration if needed */
2560 if (method == RESET_TYPE_WORLD) {
2561 if (FALCON_IS_DUAL_FUNC(efx)) {
2562 rc = pci_restore_state(nic_data->pci_dev2);
2564 EFX_ERR(efx, "failed to restore PCI config for "
2565 "the secondary function\n");
2569 rc = pci_restore_state(efx->pci_dev);
2571 EFX_ERR(efx, "failed to restore PCI config for the "
2572 "primary function\n");
2575 EFX_LOG(efx, "successfully restored PCI config\n");
2578 /* Assert that reset complete */
2579 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2580 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2582 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2585 EFX_LOG(efx, "hardware reset complete\n");
2589 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2592 pci_restore_state(efx->pci_dev);
2599 void falcon_monitor(struct efx_nic *efx)
2604 BUG_ON(!mutex_is_locked(&efx->mac_lock));
2606 rc = falcon_board(efx)->type->monitor(efx);
2608 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
2609 (rc == -ERANGE) ? "reported fault" : "failed");
2610 efx->phy_mode |= PHY_MODE_LOW_POWER;
2611 __efx_reconfigure_port(efx);
2614 if (LOOPBACK_INTERNAL(efx))
2615 link_changed = falcon_loopback_link_poll(efx);
2617 link_changed = efx->phy_op->poll(efx);
2620 falcon_stop_nic_stats(efx);
2621 falcon_deconfigure_mac_wrapper(efx);
2623 falcon_switch_mac(efx);
2624 efx->mac_op->reconfigure(efx);
2626 falcon_start_nic_stats(efx);
2628 efx_link_status_changed(efx);
2632 falcon_poll_xmac(efx);
2635 /* Zeroes out the SRAM contents. This routine must be called in
2636 * process context and is allowed to sleep.
2638 static int falcon_reset_sram(struct efx_nic *efx)
2640 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2643 /* Set the SRAM wake/sleep GPIO appropriately. */
2644 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2645 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2646 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2647 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2649 /* Initiate SRAM reset */
2650 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2651 FRF_AZ_SRM_INIT_EN, 1,
2652 FRF_AZ_SRM_NB_SZ, 0);
2653 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2655 /* Wait for SRAM reset to complete */
2658 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2660 /* SRAM reset is slow; expect around 16ms */
2661 schedule_timeout_uninterruptible(HZ / 50);
2663 /* Check for reset complete */
2664 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2665 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2666 EFX_LOG(efx, "SRAM reset complete\n");
2670 } while (++count < 20); /* wait upto 0.4 sec */
2672 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2676 static int falcon_spi_device_init(struct efx_nic *efx,
2677 struct efx_spi_device **spi_device_ret,
2678 unsigned int device_id, u32 device_type)
2680 struct efx_spi_device *spi_device;
2682 if (device_type != 0) {
2683 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2686 spi_device->device_id = device_id;
2688 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2689 spi_device->addr_len =
2690 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2691 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2692 spi_device->addr_len == 1);
2693 spi_device->erase_command =
2694 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2695 spi_device->erase_size =
2696 1 << SPI_DEV_TYPE_FIELD(device_type,
2697 SPI_DEV_TYPE_ERASE_SIZE);
2698 spi_device->block_size =
2699 1 << SPI_DEV_TYPE_FIELD(device_type,
2700 SPI_DEV_TYPE_BLOCK_SIZE);
2702 spi_device->efx = efx;
2707 kfree(*spi_device_ret);
2708 *spi_device_ret = spi_device;
2713 static void falcon_remove_spi_devices(struct efx_nic *efx)
2715 kfree(efx->spi_eeprom);
2716 efx->spi_eeprom = NULL;
2717 kfree(efx->spi_flash);
2718 efx->spi_flash = NULL;
2721 /* Extract non-volatile configuration */
2722 static int falcon_probe_nvconfig(struct efx_nic *efx)
2724 struct falcon_nvconfig *nvconfig;
2728 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2732 rc = falcon_read_nvram(efx, nvconfig);
2733 if (rc == -EINVAL) {
2734 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2735 efx->phy_type = PHY_TYPE_NONE;
2736 efx->mdio.prtad = MDIO_PRTAD_NONE;
2742 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2743 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2745 efx->phy_type = v2->port0_phy_type;
2746 efx->mdio.prtad = v2->port0_phy_addr;
2747 board_rev = le16_to_cpu(v2->board_revision);
2749 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2750 rc = falcon_spi_device_init(
2751 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2752 le32_to_cpu(v3->spi_device_type
2753 [FFE_AB_SPI_DEVICE_FLASH]));
2756 rc = falcon_spi_device_init(
2757 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2758 le32_to_cpu(v3->spi_device_type
2759 [FFE_AB_SPI_DEVICE_EEPROM]));
2765 /* Read the MAC addresses */
2766 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2768 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2770 falcon_probe_board(efx, board_rev);
2776 falcon_remove_spi_devices(efx);
2782 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2783 * count, port speed). Set workaround and feature flags accordingly.
2785 static int falcon_probe_nic_variant(struct efx_nic *efx)
2787 efx_oword_t altera_build;
2788 efx_oword_t nic_stat;
2790 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2791 if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2792 EFX_ERR(efx, "Falcon FPGA not supported\n");
2796 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2798 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2799 u8 pci_rev = efx->pci_dev->revision;
2801 if ((pci_rev == 0xff) || (pci_rev == 0)) {
2802 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2805 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2806 EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
2809 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2810 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2818 /* Probe all SPI devices on the NIC */
2819 static void falcon_probe_spi_devices(struct efx_nic *efx)
2821 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2824 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2825 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2826 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2828 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2829 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2830 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2831 EFX_LOG(efx, "Booted from %s\n",
2832 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2834 /* Disable VPD and set clock dividers to safe
2835 * values for initial programming. */
2837 EFX_LOG(efx, "Booted from internal ASIC settings;"
2838 " setting SPI config\n");
2839 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2840 /* 125 MHz / 7 ~= 20 MHz */
2841 FRF_AB_EE_SF_CLOCK_DIV, 7,
2842 /* 125 MHz / 63 ~= 2 MHz */
2843 FRF_AB_EE_EE_CLOCK_DIV, 63);
2844 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2847 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2848 falcon_spi_device_init(efx, &efx->spi_flash,
2849 FFE_AB_SPI_DEVICE_FLASH,
2850 default_flash_type);
2851 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2852 falcon_spi_device_init(efx, &efx->spi_eeprom,
2853 FFE_AB_SPI_DEVICE_EEPROM,
2857 int falcon_probe_nic(struct efx_nic *efx)
2859 struct falcon_nic_data *nic_data;
2860 struct falcon_board *board;
2863 /* Allocate storage for hardware specific data */
2864 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2867 efx->nic_data = nic_data;
2869 /* Determine number of ports etc. */
2870 rc = falcon_probe_nic_variant(efx);
2874 /* Probe secondary function if expected */
2875 if (FALCON_IS_DUAL_FUNC(efx)) {
2876 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2878 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2880 if (dev->bus == efx->pci_dev->bus &&
2881 dev->devfn == efx->pci_dev->devfn + 1) {
2882 nic_data->pci_dev2 = dev;
2886 if (!nic_data->pci_dev2) {
2887 EFX_ERR(efx, "failed to find secondary function\n");
2893 /* Now we can reset the NIC */
2894 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2896 EFX_ERR(efx, "failed to reset NIC\n");
2900 /* Allocate memory for INT_KER */
2901 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2904 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2906 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
2907 (u64)efx->irq_status.dma_addr,
2908 efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
2910 falcon_probe_spi_devices(efx);
2912 /* Read in the non-volatile configuration */
2913 rc = falcon_probe_nvconfig(efx);
2917 /* Initialise I2C adapter */
2918 board = falcon_board(efx);
2919 board->i2c_adap.owner = THIS_MODULE;
2920 board->i2c_data = falcon_i2c_bit_operations;
2921 board->i2c_data.data = efx;
2922 board->i2c_adap.algo_data = &board->i2c_data;
2923 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2924 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2925 sizeof(board->i2c_adap.name));
2926 rc = i2c_bit_add_bus(&board->i2c_adap);
2930 rc = falcon_board(efx)->type->init(efx);
2932 EFX_ERR(efx, "failed to initialise board\n");
2936 nic_data->stats_disable_count = 1;
2937 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
2938 (unsigned long)efx);
2943 BUG_ON(i2c_del_adapter(&board->i2c_adap));
2944 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2946 falcon_remove_spi_devices(efx);
2947 falcon_free_buffer(efx, &efx->irq_status);
2950 if (nic_data->pci_dev2) {
2951 pci_dev_put(nic_data->pci_dev2);
2952 nic_data->pci_dev2 = NULL;
2956 kfree(efx->nic_data);
2960 static void falcon_init_rx_cfg(struct efx_nic *efx)
2962 /* Prior to Siena the RX DMA engine will split each frame at
2963 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2964 * be so large that that never happens. */
2965 const unsigned huge_buf_size = (3 * 4096) >> 5;
2966 /* RX control FIFO thresholds (32 entries) */
2967 const unsigned ctrl_xon_thr = 20;
2968 const unsigned ctrl_xoff_thr = 25;
2969 /* RX data FIFO thresholds (256-byte units; size varies) */
2970 int data_xon_thr = rx_xon_thresh_bytes >> 8;
2971 int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
2974 efx_reado(efx, ®, FR_AZ_RX_CFG);
2975 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2976 /* Data FIFO size is 5.5K */
2977 if (data_xon_thr < 0)
2978 data_xon_thr = 512 >> 8;
2979 if (data_xoff_thr < 0)
2980 data_xoff_thr = 2048 >> 8;
2981 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2982 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2984 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
2985 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
2986 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2987 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2989 /* Data FIFO size is 80K; register fields moved */
2990 if (data_xon_thr < 0)
2991 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
2992 if (data_xoff_thr < 0)
2993 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
2994 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2995 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2997 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
2998 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
2999 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
3000 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
3001 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
3003 efx_writeo(efx, ®, FR_AZ_RX_CFG);
3006 /* This call performs hardware-specific global initialisation, such as
3007 * defining the descriptor cache sizes and number of RSS channels.
3008 * It does not set up any buffers, descriptor rings or event queues.
3010 int falcon_init_nic(struct efx_nic *efx)
3015 /* Use on-chip SRAM */
3016 efx_reado(efx, &temp, FR_AB_NIC_STAT);
3017 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
3018 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
3020 /* Set the source of the GMAC clock */
3021 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
3022 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
3023 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
3024 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
3027 /* Select the correct MAC */
3028 falcon_clock_mac(efx);
3030 rc = falcon_reset_sram(efx);
3034 /* Set positions of descriptor caches in SRAM. */
3035 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
3036 efx->type->tx_dc_base / 8);
3037 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
3038 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
3039 efx->type->rx_dc_base / 8);
3040 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
3042 /* Set TX descriptor cache size. */
3043 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
3044 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
3045 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
3047 /* Set RX descriptor cache size. Set low watermark to size-8, as
3048 * this allows most efficient prefetching.
3050 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
3051 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
3052 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
3053 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
3054 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
3056 /* Program INT_KER address */
3057 EFX_POPULATE_OWORD_2(temp,
3058 FRF_AZ_NORM_INT_VEC_DIS_KER,
3059 EFX_INT_MODE_USE_MSI(efx),
3060 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
3061 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
3063 /* Clear the parity enables on the TX data fifos as
3064 * they produce false parity errors because of timing issues
3066 if (EFX_WORKAROUND_5129(efx)) {
3067 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
3068 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
3069 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
3072 /* Enable all the genuinely fatal interrupts. (They are still
3073 * masked by the overall interrupt mask, controlled by
3074 * falcon_interrupts()).
3076 * Note: All other fatal interrupts are enabled
3078 EFX_POPULATE_OWORD_3(temp,
3079 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
3080 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
3081 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
3082 EFX_INVERT_OWORD(temp);
3083 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
3085 if (EFX_WORKAROUND_7244(efx)) {
3086 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
3087 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
3088 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
3089 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
3090 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
3091 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
3094 falcon_setup_rss_indir_table(efx);
3096 /* XXX This is documented only for Falcon A0/A1 */
3097 /* Setup RX. Wait for descriptor is broken and must
3098 * be disabled. RXDP recovery shouldn't be needed, but is.
3100 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3101 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
3102 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3103 if (EFX_WORKAROUND_5583(efx))
3104 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3105 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3107 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3108 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3110 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3111 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3112 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3113 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3114 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3115 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3116 /* Enable SW_EV to inherit in char driver - assume harmless here */
3117 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3118 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3119 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3120 /* Squash TX of packets of 16 bytes or less */
3121 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
3122 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3123 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3125 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3126 * descriptors (which is bad).
3128 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3129 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3130 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3132 falcon_init_rx_cfg(efx);
3134 /* Set destination of both TX and RX Flush events */
3135 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
3136 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3137 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3143 void falcon_remove_nic(struct efx_nic *efx)
3145 struct falcon_nic_data *nic_data = efx->nic_data;
3146 struct falcon_board *board = falcon_board(efx);
3149 board->type->fini(efx);
3151 /* Remove I2C adapter and clear it in preparation for a retry */
3152 rc = i2c_del_adapter(&board->i2c_adap);
3154 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
3156 falcon_remove_spi_devices(efx);
3157 falcon_free_buffer(efx, &efx->irq_status);
3159 falcon_reset_hw(efx, RESET_TYPE_ALL);
3161 /* Release the second function after the reset */
3162 if (nic_data->pci_dev2) {
3163 pci_dev_put(nic_data->pci_dev2);
3164 nic_data->pci_dev2 = NULL;
3167 /* Tear down the private nic state */
3168 kfree(efx->nic_data);
3169 efx->nic_data = NULL;
3172 void falcon_update_nic_stats(struct efx_nic *efx)
3174 struct falcon_nic_data *nic_data = efx->nic_data;
3177 if (nic_data->stats_disable_count)
3180 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3181 efx->n_rx_nodesc_drop_cnt +=
3182 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
3184 if (nic_data->stats_pending &&
3185 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
3186 nic_data->stats_pending = false;
3187 rmb(); /* read the done flag before the stats */
3188 efx->mac_op->update_stats(efx);
3192 void falcon_start_nic_stats(struct efx_nic *efx)
3194 struct falcon_nic_data *nic_data = efx->nic_data;
3196 spin_lock_bh(&efx->stats_lock);
3197 if (--nic_data->stats_disable_count == 0)
3198 falcon_stats_request(efx);
3199 spin_unlock_bh(&efx->stats_lock);
3202 void falcon_stop_nic_stats(struct efx_nic *efx)
3204 struct falcon_nic_data *nic_data = efx->nic_data;
3209 spin_lock_bh(&efx->stats_lock);
3210 ++nic_data->stats_disable_count;
3211 spin_unlock_bh(&efx->stats_lock);
3213 del_timer_sync(&nic_data->stats_timer);
3215 /* Wait enough time for the most recent transfer to
3217 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
3218 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
3223 spin_lock_bh(&efx->stats_lock);
3224 falcon_stats_complete(efx);
3225 spin_unlock_bh(&efx->stats_lock);
3228 /**************************************************************************
3230 * Revision-dependent attributes used by efx.c
3232 **************************************************************************
3235 struct efx_nic_type falcon_a1_nic_type = {
3236 .default_mac_ops = &falcon_xmac_operations,
3238 .revision = EFX_REV_FALCON_A1,
3239 .mem_map_size = 0x20000,
3240 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
3241 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
3242 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3243 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3244 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3245 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3246 .rx_buffer_padding = 0x24,
3247 .max_interrupt_mode = EFX_INT_MODE_MSI,
3248 .phys_addr_channels = 4,
3249 .tx_dc_base = 0x130000,
3250 .rx_dc_base = 0x100000,
3253 struct efx_nic_type falcon_b0_nic_type = {
3254 .default_mac_ops = &falcon_xmac_operations,
3256 .revision = EFX_REV_FALCON_B0,
3257 /* Map everything up to and including the RSS indirection
3258 * table. Don't map MSI-X table, MSI-X PBA since Linux
3259 * requires that they not be mapped. */
3260 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
3261 FR_BZ_RX_INDIRECTION_TBL_STEP *
3262 FR_BZ_RX_INDIRECTION_TBL_ROWS),
3263 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
3264 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
3265 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3266 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3267 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3268 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3269 .rx_buffer_padding = 0,
3270 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3271 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
3272 * interrupt handler only supports 32
3274 .tx_dc_base = 0x130000,
3275 .rx_dc_base = 0x100000,