1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/i2c-algo-bit.h>
18 #include <linux/mii.h>
19 #include "net_driver.h"
25 #include "falcon_hwdefs.h"
26 #include "falcon_io.h"
30 #include "workarounds.h"
32 /* Falcon hardware control.
33 * Falcon is the internal codename for the SFC4000 controller that is
34 * present in SFE400X evaluation boards
38 * struct falcon_nic_data - Falcon NIC state
39 * @next_buffer_table: First available buffer table id
40 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm
42 * @int_error_count: Number of internal errors seen recently
43 * @int_error_expire: Time at which error count will be expired
45 struct falcon_nic_data {
46 unsigned next_buffer_table;
47 struct pci_dev *pci_dev2;
48 struct i2c_algo_bit_data i2c_data;
50 unsigned int_error_count;
51 unsigned long int_error_expire;
54 /**************************************************************************
58 **************************************************************************
61 static int disable_dma_stats;
63 /* This is set to 16 for a good reason. In summary, if larger than
64 * 16, the descriptor cache holds more than a default socket
65 * buffer's worth of packets (for UDP we can only have at most one
66 * socket buffer's worth outstanding). This combined with the fact
67 * that we only get 1 TX event per descriptor cache means the NIC
70 #define TX_DC_ENTRIES 16
71 #define TX_DC_ENTRIES_ORDER 0
72 #define TX_DC_BASE 0x130000
74 #define RX_DC_ENTRIES 64
75 #define RX_DC_ENTRIES_ORDER 2
76 #define RX_DC_BASE 0x100000
78 static const unsigned int
79 /* "Large" EEPROM device: Atmel AT25640 or similar
80 * 8 KB, 16-bit address, 32 B write block */
81 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
82 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
83 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
84 /* Default flash device: Atmel AT25F1024
85 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
86 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
87 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
88 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
89 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
90 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
92 /* RX FIFO XOFF watermark
94 * When the amount of the RX FIFO increases used increases past this
95 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
96 * This also has an effect on RX/TX arbitration
98 static int rx_xoff_thresh_bytes = -1;
99 module_param(rx_xoff_thresh_bytes, int, 0644);
100 MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
102 /* RX FIFO XON watermark
104 * When the amount of the RX FIFO used decreases below this
105 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
106 * This also has an effect on RX/TX arbitration
108 static int rx_xon_thresh_bytes = -1;
109 module_param(rx_xon_thresh_bytes, int, 0644);
110 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
112 /* TX descriptor ring size - min 512 max 4k */
113 #define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
114 #define FALCON_TXD_RING_SIZE 1024
115 #define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
117 /* RX descriptor ring size - min 512 max 4k */
118 #define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
119 #define FALCON_RXD_RING_SIZE 1024
120 #define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
122 /* Event queue size - max 32k */
123 #define FALCON_EVQ_ORDER EVQ_SIZE_4K
124 #define FALCON_EVQ_SIZE 4096
125 #define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
127 /* If FALCON_MAX_INT_ERRORS internal errors occur within
128 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
131 #define FALCON_INT_ERROR_EXPIRE 3600
132 #define FALCON_MAX_INT_ERRORS 5
134 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
136 #define FALCON_FLUSH_INTERVAL 10
137 #define FALCON_FLUSH_POLL_COUNT 100
139 /**************************************************************************
143 **************************************************************************
146 /* DMA address mask */
147 #define FALCON_DMA_MASK DMA_BIT_MASK(46)
149 /* TX DMA length mask (13-bit) */
150 #define FALCON_TX_DMA_MASK (4096 - 1)
152 /* Size and alignment of special buffers (4KB) */
153 #define FALCON_BUF_SIZE 4096
155 /* Dummy SRAM size code */
156 #define SRM_NB_BSZ_ONCHIP_ONLY (-1)
158 /* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
159 #define PCI_EXP_DEVCAP_PWR_VAL_LBN 18
160 #define PCI_EXP_DEVCAP_PWR_SCL_LBN 26
161 #define PCI_EXP_DEVCTL_PAYLOAD_LBN 5
162 #define PCI_EXP_LNKSTA_LNK_WID 0x3f0
163 #define PCI_EXP_LNKSTA_LNK_WID_LBN 4
165 #define FALCON_IS_DUAL_FUNC(efx) \
166 (falcon_rev(efx) < FALCON_REV_B0)
168 /**************************************************************************
170 * Falcon hardware access
172 **************************************************************************/
174 /* Read the current event from the event queue */
175 static inline efx_qword_t *falcon_event(struct efx_channel *channel,
178 return (((efx_qword_t *) (channel->eventq.addr)) + index);
181 /* See if an event is present
183 * We check both the high and low dword of the event for all ones. We
184 * wrote all ones when we cleared the event, and no valid event can
185 * have all ones in either its high or low dwords. This approach is
186 * robust against reordering.
188 * Note that using a single 64-bit comparison is incorrect; even
189 * though the CPU read will be atomic, the DMA write may not be.
191 static inline int falcon_event_present(efx_qword_t *event)
193 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
194 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
197 /**************************************************************************
199 * I2C bus - this is a bit-bashing interface using GPIO pins
200 * Note that it uses the output enables to tristate the outputs
201 * SDA is the data pin and SCL is the clock
203 **************************************************************************
205 static void falcon_setsda(void *data, int state)
207 struct efx_nic *efx = (struct efx_nic *)data;
210 falcon_read(efx, ®, GPIO_CTL_REG_KER);
211 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state);
212 falcon_write(efx, ®, GPIO_CTL_REG_KER);
215 static void falcon_setscl(void *data, int state)
217 struct efx_nic *efx = (struct efx_nic *)data;
220 falcon_read(efx, ®, GPIO_CTL_REG_KER);
221 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state);
222 falcon_write(efx, ®, GPIO_CTL_REG_KER);
225 static int falcon_getsda(void *data)
227 struct efx_nic *efx = (struct efx_nic *)data;
230 falcon_read(efx, ®, GPIO_CTL_REG_KER);
231 return EFX_OWORD_FIELD(reg, GPIO3_IN);
234 static int falcon_getscl(void *data)
236 struct efx_nic *efx = (struct efx_nic *)data;
239 falcon_read(efx, ®, GPIO_CTL_REG_KER);
240 return EFX_OWORD_FIELD(reg, GPIO0_IN);
243 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
244 .setsda = falcon_setsda,
245 .setscl = falcon_setscl,
246 .getsda = falcon_getsda,
247 .getscl = falcon_getscl,
249 /* Wait up to 50 ms for slave to let us pull SCL high */
250 .timeout = DIV_ROUND_UP(HZ, 20),
253 /**************************************************************************
255 * Falcon special buffer handling
256 * Special buffers are used for event queues and the TX and RX
259 *************************************************************************/
262 * Initialise a Falcon special buffer
264 * This will define a buffer (previously allocated via
265 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
266 * it to be used for event queues, descriptor rings etc.
269 falcon_init_special_buffer(struct efx_nic *efx,
270 struct efx_special_buffer *buffer)
272 efx_qword_t buf_desc;
277 EFX_BUG_ON_PARANOID(!buffer->addr);
279 /* Write buffer descriptors to NIC */
280 for (i = 0; i < buffer->entries; i++) {
281 index = buffer->index + i;
282 dma_addr = buffer->dma_addr + (i * 4096);
283 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
284 index, (unsigned long long)dma_addr);
285 EFX_POPULATE_QWORD_4(buf_desc,
286 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
288 BUF_ADR_FBUF, (dma_addr >> 12),
289 BUF_OWNER_ID_FBUF, 0);
290 falcon_write_sram(efx, &buf_desc, index);
294 /* Unmaps a buffer from Falcon and clears the buffer table entries */
296 falcon_fini_special_buffer(struct efx_nic *efx,
297 struct efx_special_buffer *buffer)
299 efx_oword_t buf_tbl_upd;
300 unsigned int start = buffer->index;
301 unsigned int end = (buffer->index + buffer->entries - 1);
303 if (!buffer->entries)
306 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
307 buffer->index, buffer->index + buffer->entries - 1);
309 EFX_POPULATE_OWORD_4(buf_tbl_upd,
313 BUF_CLR_START_ID, start);
314 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
318 * Allocate a new Falcon special buffer
320 * This allocates memory for a new buffer, clears it and allocates a
321 * new buffer ID range. It does not write into Falcon's buffer table.
323 * This call will allocate 4KB buffers, since Falcon can't use 8KB
324 * buffers for event queues and descriptor rings.
326 static int falcon_alloc_special_buffer(struct efx_nic *efx,
327 struct efx_special_buffer *buffer,
330 struct falcon_nic_data *nic_data = efx->nic_data;
332 len = ALIGN(len, FALCON_BUF_SIZE);
334 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
339 buffer->entries = len / FALCON_BUF_SIZE;
340 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
342 /* All zeros is a potentially valid event so memset to 0xff */
343 memset(buffer->addr, 0xff, len);
345 /* Select new buffer ID */
346 buffer->index = nic_data->next_buffer_table;
347 nic_data->next_buffer_table += buffer->entries;
349 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
350 "(virt %p phys %lx)\n", buffer->index,
351 buffer->index + buffer->entries - 1,
352 (unsigned long long)buffer->dma_addr, len,
353 buffer->addr, virt_to_phys(buffer->addr));
358 static void falcon_free_special_buffer(struct efx_nic *efx,
359 struct efx_special_buffer *buffer)
364 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
365 "(virt %p phys %lx)\n", buffer->index,
366 buffer->index + buffer->entries - 1,
367 (unsigned long long)buffer->dma_addr, buffer->len,
368 buffer->addr, virt_to_phys(buffer->addr));
370 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
376 /**************************************************************************
378 * Falcon generic buffer handling
379 * These buffers are used for interrupt status and MAC stats
381 **************************************************************************/
383 static int falcon_alloc_buffer(struct efx_nic *efx,
384 struct efx_buffer *buffer, unsigned int len)
386 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
391 memset(buffer->addr, 0, len);
395 static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
398 pci_free_consistent(efx->pci_dev, buffer->len,
399 buffer->addr, buffer->dma_addr);
404 /**************************************************************************
408 **************************************************************************/
410 /* Returns a pointer to the specified transmit descriptor in the TX
411 * descriptor queue belonging to the specified channel.
413 static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
416 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
419 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
420 static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
425 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
426 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
427 falcon_writel_page(tx_queue->efx, ®,
428 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
432 /* For each entry inserted into the software descriptor ring, create a
433 * descriptor in the hardware TX descriptor ring (in host memory), and
436 void falcon_push_buffers(struct efx_tx_queue *tx_queue)
439 struct efx_tx_buffer *buffer;
443 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
446 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
447 buffer = &tx_queue->buffer[write_ptr];
448 txd = falcon_tx_desc(tx_queue, write_ptr);
449 ++tx_queue->write_count;
451 /* Create TX descriptor ring entry */
452 EFX_POPULATE_QWORD_5(*txd,
454 TX_KER_CONT, buffer->continuation,
455 TX_KER_BYTE_CNT, buffer->len,
456 TX_KER_BUF_REGION, 0,
457 TX_KER_BUF_ADR, buffer->dma_addr);
458 } while (tx_queue->write_count != tx_queue->insert_count);
460 wmb(); /* Ensure descriptors are written before they are fetched */
461 falcon_notify_tx_desc(tx_queue);
464 /* Allocate hardware resources for a TX queue */
465 int falcon_probe_tx(struct efx_tx_queue *tx_queue)
467 struct efx_nic *efx = tx_queue->efx;
468 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
469 FALCON_TXD_RING_SIZE *
470 sizeof(efx_qword_t));
473 void falcon_init_tx(struct efx_tx_queue *tx_queue)
475 efx_oword_t tx_desc_ptr;
476 struct efx_nic *efx = tx_queue->efx;
478 tx_queue->flushed = false;
480 /* Pin TX descriptor ring */
481 falcon_init_special_buffer(efx, &tx_queue->txd);
483 /* Push TX descriptor ring to card */
484 EFX_POPULATE_OWORD_10(tx_desc_ptr,
488 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
489 TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
490 TX_DESCQ_OWNER_ID, 0,
491 TX_DESCQ_LABEL, tx_queue->queue,
492 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
494 TX_NON_IP_DROP_DIS_B0, 1);
496 if (falcon_rev(efx) >= FALCON_REV_B0) {
497 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
498 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
499 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
502 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
505 if (falcon_rev(efx) < FALCON_REV_B0) {
508 /* Only 128 bits in this register */
509 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
511 falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1);
512 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
513 clear_bit_le(tx_queue->queue, (void *)®);
515 set_bit_le(tx_queue->queue, (void *)®);
516 falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1);
520 static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
522 struct efx_nic *efx = tx_queue->efx;
523 efx_oword_t tx_flush_descq;
525 /* Post a flush command */
526 EFX_POPULATE_OWORD_2(tx_flush_descq,
527 TX_FLUSH_DESCQ_CMD, 1,
528 TX_FLUSH_DESCQ, tx_queue->queue);
529 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
532 void falcon_fini_tx(struct efx_tx_queue *tx_queue)
534 struct efx_nic *efx = tx_queue->efx;
535 efx_oword_t tx_desc_ptr;
537 /* The queue should have been flushed */
538 WARN_ON(!tx_queue->flushed);
540 /* Remove TX descriptor ring from card */
541 EFX_ZERO_OWORD(tx_desc_ptr);
542 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
545 /* Unpin TX descriptor ring */
546 falcon_fini_special_buffer(efx, &tx_queue->txd);
549 /* Free buffers backing TX queue */
550 void falcon_remove_tx(struct efx_tx_queue *tx_queue)
552 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
555 /**************************************************************************
559 **************************************************************************/
561 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
562 static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
565 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
568 /* This creates an entry in the RX descriptor queue */
569 static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
572 struct efx_rx_buffer *rx_buf;
575 rxd = falcon_rx_desc(rx_queue, index);
576 rx_buf = efx_rx_buffer(rx_queue, index);
577 EFX_POPULATE_QWORD_3(*rxd,
580 rx_queue->efx->type->rx_buffer_padding,
581 RX_KER_BUF_REGION, 0,
582 RX_KER_BUF_ADR, rx_buf->dma_addr);
585 /* This writes to the RX_DESC_WPTR register for the specified receive
588 void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
593 while (rx_queue->notified_count != rx_queue->added_count) {
594 falcon_build_rx_desc(rx_queue,
595 rx_queue->notified_count &
596 FALCON_RXD_RING_MASK);
597 ++rx_queue->notified_count;
601 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
602 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
603 falcon_writel_page(rx_queue->efx, ®,
604 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
607 int falcon_probe_rx(struct efx_rx_queue *rx_queue)
609 struct efx_nic *efx = rx_queue->efx;
610 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
611 FALCON_RXD_RING_SIZE *
612 sizeof(efx_qword_t));
615 void falcon_init_rx(struct efx_rx_queue *rx_queue)
617 efx_oword_t rx_desc_ptr;
618 struct efx_nic *efx = rx_queue->efx;
619 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
620 bool iscsi_digest_en = is_b0;
622 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
623 rx_queue->queue, rx_queue->rxd.index,
624 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
626 rx_queue->flushed = false;
628 /* Pin RX descriptor ring */
629 falcon_init_special_buffer(efx, &rx_queue->rxd);
631 /* Push RX descriptor ring to card */
632 EFX_POPULATE_OWORD_10(rx_desc_ptr,
633 RX_ISCSI_DDIG_EN, iscsi_digest_en,
634 RX_ISCSI_HDIG_EN, iscsi_digest_en,
635 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
636 RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
637 RX_DESCQ_OWNER_ID, 0,
638 RX_DESCQ_LABEL, rx_queue->queue,
639 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
640 RX_DESCQ_TYPE, 0 /* kernel queue */ ,
641 /* For >=B0 this is scatter so disable */
642 RX_DESCQ_JUMBO, !is_b0,
644 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
648 static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
650 struct efx_nic *efx = rx_queue->efx;
651 efx_oword_t rx_flush_descq;
653 /* Post a flush command */
654 EFX_POPULATE_OWORD_2(rx_flush_descq,
655 RX_FLUSH_DESCQ_CMD, 1,
656 RX_FLUSH_DESCQ, rx_queue->queue);
657 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
660 void falcon_fini_rx(struct efx_rx_queue *rx_queue)
662 efx_oword_t rx_desc_ptr;
663 struct efx_nic *efx = rx_queue->efx;
665 /* The queue should already have been flushed */
666 WARN_ON(!rx_queue->flushed);
668 /* Remove RX descriptor ring from card */
669 EFX_ZERO_OWORD(rx_desc_ptr);
670 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
673 /* Unpin RX descriptor ring */
674 falcon_fini_special_buffer(efx, &rx_queue->rxd);
677 /* Free buffers backing RX queue */
678 void falcon_remove_rx(struct efx_rx_queue *rx_queue)
680 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
683 /**************************************************************************
685 * Falcon event queue processing
686 * Event queues are processed by per-channel tasklets.
688 **************************************************************************/
690 /* Update a channel's event queue's read pointer (RPTR) register
692 * This writes the EVQ_RPTR_REG register for the specified channel's
695 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
696 * whereas channel->eventq_read_ptr contains the index of the "next to
699 void falcon_eventq_read_ack(struct efx_channel *channel)
702 struct efx_nic *efx = channel->efx;
704 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
705 falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base,
709 /* Use HW to insert a SW defined event */
710 void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
712 efx_oword_t drv_ev_reg;
714 EFX_POPULATE_OWORD_2(drv_ev_reg,
715 DRV_EV_QID, channel->channel,
717 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
718 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
721 /* Handle a transmit completion event
723 * Falcon batches TX completion events; the message we receive is of
724 * the form "complete all TX events up to this index".
726 static void falcon_handle_tx_event(struct efx_channel *channel,
729 unsigned int tx_ev_desc_ptr;
730 unsigned int tx_ev_q_label;
731 struct efx_tx_queue *tx_queue;
732 struct efx_nic *efx = channel->efx;
734 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
735 /* Transmit completion */
736 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
737 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
738 tx_queue = &efx->tx_queue[tx_ev_q_label];
739 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
740 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
741 /* Rewrite the FIFO write pointer */
742 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
743 tx_queue = &efx->tx_queue[tx_ev_q_label];
745 if (efx_dev_registered(efx))
746 netif_tx_lock(efx->net_dev);
747 falcon_notify_tx_desc(tx_queue);
748 if (efx_dev_registered(efx))
749 netif_tx_unlock(efx->net_dev);
750 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
751 EFX_WORKAROUND_10727(efx)) {
752 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
754 EFX_ERR(efx, "channel %d unexpected TX event "
755 EFX_QWORD_FMT"\n", channel->channel,
756 EFX_QWORD_VAL(*event));
760 /* Detect errors included in the rx_evt_pkt_ok bit. */
761 static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
762 const efx_qword_t *event,
766 struct efx_nic *efx = rx_queue->efx;
767 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
768 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
769 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
770 bool rx_ev_other_err, rx_ev_pause_frm;
771 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
772 unsigned rx_ev_pkt_type;
774 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
775 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
776 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
777 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
778 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
779 RX_EV_BUF_OWNER_ID_ERR);
780 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
781 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
782 RX_EV_IP_HDR_CHKSUM_ERR);
783 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
784 RX_EV_TCP_UDP_CHKSUM_ERR);
785 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
786 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
787 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
788 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
789 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
791 /* Every error apart from tobe_disc and pause_frm */
792 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
793 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
794 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
796 /* Count errors that are not in MAC stats. Ignore expected
797 * checksum errors during self-test. */
799 ++rx_queue->channel->n_rx_frm_trunc;
800 else if (rx_ev_tobe_disc)
801 ++rx_queue->channel->n_rx_tobe_disc;
802 else if (!efx->loopback_selftest) {
803 if (rx_ev_ip_hdr_chksum_err)
804 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
805 else if (rx_ev_tcp_udp_chksum_err)
806 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
808 if (rx_ev_ip_frag_err)
809 ++rx_queue->channel->n_rx_ip_frag_err;
811 /* The frame must be discarded if any of these are true. */
812 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
813 rx_ev_tobe_disc | rx_ev_pause_frm);
815 /* TOBE_DISC is expected on unicast mismatches; don't print out an
816 * error message. FRM_TRUNC indicates RXDP dropped the packet due
817 * to a FIFO overflow.
819 #ifdef EFX_ENABLE_DEBUG
820 if (rx_ev_other_err) {
821 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
822 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
823 rx_queue->queue, EFX_QWORD_VAL(*event),
824 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
825 rx_ev_ip_hdr_chksum_err ?
826 " [IP_HDR_CHKSUM_ERR]" : "",
827 rx_ev_tcp_udp_chksum_err ?
828 " [TCP_UDP_CHKSUM_ERR]" : "",
829 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
830 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
831 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
832 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
833 rx_ev_pause_frm ? " [PAUSE]" : "");
838 /* Handle receive events that are not in-order. */
839 static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
842 struct efx_nic *efx = rx_queue->efx;
843 unsigned expected, dropped;
845 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
846 dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
847 FALCON_RXD_RING_MASK);
848 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
849 dropped, index, expected);
851 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
852 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
855 /* Handle a packet received event
857 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
858 * wrong destination address
859 * Also "is multicast" and "matches multicast filter" flags can be used to
860 * discard non-matching multicast packets.
862 static void falcon_handle_rx_event(struct efx_channel *channel,
863 const efx_qword_t *event)
865 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
866 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
867 unsigned expected_ptr;
868 bool rx_ev_pkt_ok, discard = false, checksummed;
869 struct efx_rx_queue *rx_queue;
870 struct efx_nic *efx = channel->efx;
872 /* Basic packet information */
873 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
874 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
875 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
876 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
877 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
878 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
880 rx_queue = &efx->rx_queue[channel->channel];
882 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
883 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
884 if (unlikely(rx_ev_desc_ptr != expected_ptr))
885 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
887 if (likely(rx_ev_pkt_ok)) {
888 /* If packet is marked as OK and packet type is TCP/IPv4 or
889 * UDP/IPv4, then we can rely on the hardware checksum.
891 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
893 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
898 /* Detect multicast packets that didn't match the filter */
899 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
900 if (rx_ev_mcast_pkt) {
901 unsigned int rx_ev_mcast_hash_match =
902 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
904 if (unlikely(!rx_ev_mcast_hash_match))
908 /* Handle received packet */
909 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
910 checksummed, discard);
913 /* Global events are basically PHY events */
914 static void falcon_handle_global_event(struct efx_channel *channel,
917 struct efx_nic *efx = channel->efx;
918 bool handled = false;
920 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
921 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
922 EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
923 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
924 efx->phy_op->clear_interrupt(efx);
925 queue_work(efx->workqueue, &efx->phy_work);
929 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
930 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
931 queue_work(efx->workqueue, &efx->mac_work);
935 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
936 EFX_ERR(efx, "channel %d seen global RX_RESET "
937 "event. Resetting.\n", channel->channel);
939 atomic_inc(&efx->rx_reset);
940 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
941 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
946 EFX_ERR(efx, "channel %d unknown global event "
947 EFX_QWORD_FMT "\n", channel->channel,
948 EFX_QWORD_VAL(*event));
951 static void falcon_handle_driver_event(struct efx_channel *channel,
954 struct efx_nic *efx = channel->efx;
955 unsigned int ev_sub_code;
956 unsigned int ev_sub_data;
958 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
959 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
961 switch (ev_sub_code) {
962 case TX_DESCQ_FLS_DONE_EV_DECODE:
963 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
964 channel->channel, ev_sub_data);
966 case RX_DESCQ_FLS_DONE_EV_DECODE:
967 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
968 channel->channel, ev_sub_data);
970 case EVQ_INIT_DONE_EV_DECODE:
971 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
972 channel->channel, ev_sub_data);
974 case SRM_UPD_DONE_EV_DECODE:
975 EFX_TRACE(efx, "channel %d SRAM update done\n",
978 case WAKE_UP_EV_DECODE:
979 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
980 channel->channel, ev_sub_data);
982 case TIMER_EV_DECODE:
983 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
984 channel->channel, ev_sub_data);
986 case RX_RECOVERY_EV_DECODE:
987 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
988 "Resetting.\n", channel->channel);
989 atomic_inc(&efx->rx_reset);
990 efx_schedule_reset(efx,
991 EFX_WORKAROUND_6555(efx) ?
992 RESET_TYPE_RX_RECOVERY :
995 case RX_DSC_ERROR_EV_DECODE:
996 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
997 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
998 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1000 case TX_DSC_ERROR_EV_DECODE:
1001 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1002 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1003 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1006 EFX_TRACE(efx, "channel %d unknown driver event code %d "
1007 "data %04x\n", channel->channel, ev_sub_code,
1013 int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1015 unsigned int read_ptr;
1016 efx_qword_t event, *p_event;
1020 read_ptr = channel->eventq_read_ptr;
1023 p_event = falcon_event(channel, read_ptr);
1026 if (!falcon_event_present(&event))
1030 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1031 channel->channel, EFX_QWORD_VAL(event));
1033 /* Clear this event by marking it all ones */
1034 EFX_SET_QWORD(*p_event);
1036 ev_code = EFX_QWORD_FIELD(event, EV_CODE);
1039 case RX_IP_EV_DECODE:
1040 falcon_handle_rx_event(channel, &event);
1043 case TX_IP_EV_DECODE:
1044 falcon_handle_tx_event(channel, &event);
1046 case DRV_GEN_EV_DECODE:
1047 channel->eventq_magic
1048 = EFX_QWORD_FIELD(event, EVQ_MAGIC);
1049 EFX_LOG(channel->efx, "channel %d received generated "
1050 "event "EFX_QWORD_FMT"\n", channel->channel,
1051 EFX_QWORD_VAL(event));
1053 case GLOBAL_EV_DECODE:
1054 falcon_handle_global_event(channel, &event);
1056 case DRIVER_EV_DECODE:
1057 falcon_handle_driver_event(channel, &event);
1060 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1061 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1062 ev_code, EFX_QWORD_VAL(event));
1065 /* Increment read pointer */
1066 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1068 } while (rx_packets < rx_quota);
1070 channel->eventq_read_ptr = read_ptr;
1074 void falcon_set_int_moderation(struct efx_channel *channel)
1076 efx_dword_t timer_cmd;
1077 struct efx_nic *efx = channel->efx;
1079 /* Set timer register */
1080 if (channel->irq_moderation) {
1081 /* Round to resolution supported by hardware. The value we
1082 * program is based at 0. So actual interrupt moderation
1083 * achieved is ((x + 1) * res).
1085 unsigned int res = 5;
1086 channel->irq_moderation -= (channel->irq_moderation % res);
1087 if (channel->irq_moderation < res)
1088 channel->irq_moderation = res;
1089 EFX_POPULATE_DWORD_2(timer_cmd,
1090 TIMER_MODE, TIMER_MODE_INT_HLDOFF,
1092 (channel->irq_moderation / res) - 1);
1094 EFX_POPULATE_DWORD_2(timer_cmd,
1095 TIMER_MODE, TIMER_MODE_DIS,
1098 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
1103 /* Allocate buffer table entries for event queue */
1104 int falcon_probe_eventq(struct efx_channel *channel)
1106 struct efx_nic *efx = channel->efx;
1107 unsigned int evq_size;
1109 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
1110 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1113 void falcon_init_eventq(struct efx_channel *channel)
1115 efx_oword_t evq_ptr;
1116 struct efx_nic *efx = channel->efx;
1118 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1119 channel->channel, channel->eventq.index,
1120 channel->eventq.index + channel->eventq.entries - 1);
1122 /* Pin event queue buffer */
1123 falcon_init_special_buffer(efx, &channel->eventq);
1125 /* Fill event queue with all ones (i.e. empty events) */
1126 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1128 /* Push event queue to card */
1129 EFX_POPULATE_OWORD_3(evq_ptr,
1131 EVQ_SIZE, FALCON_EVQ_ORDER,
1132 EVQ_BUF_BASE_ID, channel->eventq.index);
1133 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1136 falcon_set_int_moderation(channel);
1139 void falcon_fini_eventq(struct efx_channel *channel)
1141 efx_oword_t eventq_ptr;
1142 struct efx_nic *efx = channel->efx;
1144 /* Remove event queue from card */
1145 EFX_ZERO_OWORD(eventq_ptr);
1146 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1149 /* Unpin event queue */
1150 falcon_fini_special_buffer(efx, &channel->eventq);
1153 /* Free buffers backing event queue */
1154 void falcon_remove_eventq(struct efx_channel *channel)
1156 falcon_free_special_buffer(channel->efx, &channel->eventq);
1160 /* Generates a test event on the event queue. A subsequent call to
1161 * process_eventq() should pick up the event and place the value of
1162 * "magic" into channel->eventq_magic;
1164 void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1166 efx_qword_t test_event;
1168 EFX_POPULATE_QWORD_2(test_event,
1169 EV_CODE, DRV_GEN_EV_DECODE,
1171 falcon_generate_event(channel, &test_event);
1174 void falcon_sim_phy_event(struct efx_nic *efx)
1176 efx_qword_t phy_event;
1178 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1180 EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1182 EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1184 falcon_generate_event(&efx->channel[0], &phy_event);
1187 /**************************************************************************
1191 **************************************************************************/
1194 static void falcon_poll_flush_events(struct efx_nic *efx)
1196 struct efx_channel *channel = &efx->channel[0];
1197 struct efx_tx_queue *tx_queue;
1198 struct efx_rx_queue *rx_queue;
1199 unsigned int read_ptr = channel->eventq_read_ptr;
1200 unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
1203 efx_qword_t *event = falcon_event(channel, read_ptr);
1204 int ev_code, ev_sub_code, ev_queue;
1207 if (!falcon_event_present(event))
1210 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
1211 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1212 if (ev_code == DRIVER_EV_DECODE &&
1213 ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) {
1214 ev_queue = EFX_QWORD_FIELD(*event,
1215 DRIVER_EV_TX_DESCQ_ID);
1216 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1217 tx_queue = efx->tx_queue + ev_queue;
1218 tx_queue->flushed = true;
1220 } else if (ev_code == DRIVER_EV_DECODE &&
1221 ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) {
1222 ev_queue = EFX_QWORD_FIELD(*event,
1223 DRIVER_EV_RX_DESCQ_ID);
1224 ev_failed = EFX_QWORD_FIELD(*event,
1225 DRIVER_EV_RX_FLUSH_FAIL);
1226 if (ev_queue < efx->n_rx_queues) {
1227 rx_queue = efx->rx_queue + ev_queue;
1229 /* retry the rx flush */
1231 falcon_flush_rx_queue(rx_queue);
1233 rx_queue->flushed = true;
1237 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1238 } while (read_ptr != end_ptr);
1241 /* Handle tx and rx flushes at the same time, since they run in
1242 * parallel in the hardware and there's no reason for us to
1244 int falcon_flush_queues(struct efx_nic *efx)
1246 struct efx_rx_queue *rx_queue;
1247 struct efx_tx_queue *tx_queue;
1251 /* Issue flush requests */
1252 efx_for_each_tx_queue(tx_queue, efx) {
1253 tx_queue->flushed = false;
1254 falcon_flush_tx_queue(tx_queue);
1256 efx_for_each_rx_queue(rx_queue, efx) {
1257 rx_queue->flushed = false;
1258 falcon_flush_rx_queue(rx_queue);
1261 /* Poll the evq looking for flush completions. Since we're not pushing
1262 * any more rx or tx descriptors at this point, we're in no danger of
1263 * overflowing the evq whilst we wait */
1264 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1265 msleep(FALCON_FLUSH_INTERVAL);
1266 falcon_poll_flush_events(efx);
1268 /* Check if every queue has been succesfully flushed */
1269 outstanding = false;
1270 efx_for_each_tx_queue(tx_queue, efx)
1271 outstanding |= !tx_queue->flushed;
1272 efx_for_each_rx_queue(rx_queue, efx)
1273 outstanding |= !rx_queue->flushed;
1278 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway. "flushed" now
1280 * indicates that we tried to flush. */
1281 efx_for_each_tx_queue(tx_queue, efx) {
1282 if (!tx_queue->flushed)
1283 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1285 tx_queue->flushed = true;
1287 efx_for_each_rx_queue(rx_queue, efx) {
1288 if (!rx_queue->flushed)
1289 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1291 rx_queue->flushed = true;
1294 if (EFX_WORKAROUND_7803(efx))
1300 /**************************************************************************
1302 * Falcon hardware interrupts
1303 * The hardware interrupt handler does very little work; all the event
1304 * queue processing is carried out by per-channel tasklets.
1306 **************************************************************************/
1308 /* Enable/disable/generate Falcon interrupts */
1309 static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1312 efx_oword_t int_en_reg_ker;
1314 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1316 DRV_INT_EN_KER, enabled);
1317 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
1320 void falcon_enable_interrupts(struct efx_nic *efx)
1322 efx_oword_t int_adr_reg_ker;
1323 struct efx_channel *channel;
1325 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1326 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1328 /* Program address */
1329 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1330 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
1331 INT_ADR_KER, efx->irq_status.dma_addr);
1332 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
1334 /* Enable interrupts */
1335 falcon_interrupts(efx, 1, 0);
1337 /* Force processing of all the channels to get the EVQ RPTRs up to
1339 efx_for_each_channel(channel, efx)
1340 efx_schedule_channel(channel);
1343 void falcon_disable_interrupts(struct efx_nic *efx)
1345 /* Disable interrupts */
1346 falcon_interrupts(efx, 0, 0);
1349 /* Generate a Falcon test interrupt
1350 * Interrupt must already have been enabled, otherwise nasty things
1353 void falcon_generate_interrupt(struct efx_nic *efx)
1355 falcon_interrupts(efx, 1, 1);
1358 /* Acknowledge a legacy interrupt from Falcon
1360 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1362 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1363 * BIU. Interrupt acknowledge is read sensitive so must write instead
1364 * (then read to ensure the BIU collector is flushed)
1366 * NB most hardware supports MSI interrupts
1368 static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1372 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
1373 falcon_writel(efx, ®, INT_ACK_REG_KER_A1);
1374 falcon_readl(efx, ®, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
1377 /* Process a fatal interrupt
1378 * Disable bus mastering ASAP and schedule a reset
1380 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1382 struct falcon_nic_data *nic_data = efx->nic_data;
1383 efx_oword_t *int_ker = efx->irq_status.addr;
1384 efx_oword_t fatal_intr;
1385 int error, mem_perr;
1387 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
1388 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
1390 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1391 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1392 EFX_OWORD_VAL(fatal_intr),
1393 error ? "disabling bus mastering" : "no recognised error");
1397 /* If this is a memory parity error dump which blocks are offending */
1398 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
1401 falcon_read(efx, ®, MEM_STAT_REG_KER);
1402 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1403 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1406 /* Disable both devices */
1407 pci_clear_master(efx->pci_dev);
1408 if (FALCON_IS_DUAL_FUNC(efx))
1409 pci_clear_master(nic_data->pci_dev2);
1410 falcon_disable_interrupts(efx);
1412 /* Count errors and reset or disable the NIC accordingly */
1413 if (nic_data->int_error_count == 0 ||
1414 time_after(jiffies, nic_data->int_error_expire)) {
1415 nic_data->int_error_count = 0;
1416 nic_data->int_error_expire =
1417 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1419 if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
1420 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1421 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1423 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1424 "NIC will be disabled\n");
1425 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1431 /* Handle a legacy interrupt from Falcon
1432 * Acknowledges the interrupt and schedule event queue processing.
1434 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1436 struct efx_nic *efx = dev_id;
1437 efx_oword_t *int_ker = efx->irq_status.addr;
1438 struct efx_channel *channel;
1443 /* Read the ISR which also ACKs the interrupts */
1444 falcon_readl(efx, ®, INT_ISR0_B0);
1445 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1447 /* Check to see if we have a serious error condition */
1448 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1449 if (unlikely(syserr))
1450 return falcon_fatal_interrupt(efx);
1455 efx->last_irq_cpu = raw_smp_processor_id();
1456 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1457 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1459 /* Schedule processing of any interrupting queues */
1460 channel = &efx->channel[0];
1463 efx_schedule_channel(channel);
1472 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1474 struct efx_nic *efx = dev_id;
1475 efx_oword_t *int_ker = efx->irq_status.addr;
1476 struct efx_channel *channel;
1480 /* Check to see if this is our interrupt. If it isn't, we
1481 * exit without having touched the hardware.
1483 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1484 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1485 raw_smp_processor_id());
1488 efx->last_irq_cpu = raw_smp_processor_id();
1489 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1490 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1492 /* Check to see if we have a serious error condition */
1493 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1494 if (unlikely(syserr))
1495 return falcon_fatal_interrupt(efx);
1497 /* Determine interrupting queues, clear interrupt status
1498 * register and acknowledge the device interrupt.
1500 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1501 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1502 EFX_ZERO_OWORD(*int_ker);
1503 wmb(); /* Ensure the vector is cleared before interrupt ack */
1504 falcon_irq_ack_a1(efx);
1506 /* Schedule processing of any interrupting queues */
1507 channel = &efx->channel[0];
1510 efx_schedule_channel(channel);
1518 /* Handle an MSI interrupt from Falcon
1520 * Handle an MSI hardware interrupt. This routine schedules event
1521 * queue processing. No interrupt acknowledgement cycle is necessary.
1522 * Also, we never need to check that the interrupt is for us, since
1523 * MSI interrupts cannot be shared.
1525 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1527 struct efx_channel *channel = dev_id;
1528 struct efx_nic *efx = channel->efx;
1529 efx_oword_t *int_ker = efx->irq_status.addr;
1532 efx->last_irq_cpu = raw_smp_processor_id();
1533 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1534 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1536 /* Check to see if we have a serious error condition */
1537 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1538 if (unlikely(syserr))
1539 return falcon_fatal_interrupt(efx);
1541 /* Schedule processing of the channel */
1542 efx_schedule_channel(channel);
1548 /* Setup RSS indirection table.
1549 * This maps from the hash value of the packet to RXQ
1551 static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1554 unsigned long offset;
1557 if (falcon_rev(efx) < FALCON_REV_B0)
1560 for (offset = RX_RSS_INDIR_TBL_B0;
1561 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1563 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1564 i % efx->n_rx_queues);
1565 falcon_writel(efx, &dword, offset);
1570 /* Hook interrupt handler(s)
1571 * Try MSI and then legacy interrupts.
1573 int falcon_init_interrupt(struct efx_nic *efx)
1575 struct efx_channel *channel;
1578 if (!EFX_INT_MODE_USE_MSI(efx)) {
1579 irq_handler_t handler;
1580 if (falcon_rev(efx) >= FALCON_REV_B0)
1581 handler = falcon_legacy_interrupt_b0;
1583 handler = falcon_legacy_interrupt_a1;
1585 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1588 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1595 /* Hook MSI or MSI-X interrupt */
1596 efx_for_each_channel(channel, efx) {
1597 rc = request_irq(channel->irq, falcon_msi_interrupt,
1598 IRQF_PROBE_SHARED, /* Not shared */
1599 channel->name, channel);
1601 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1609 efx_for_each_channel(channel, efx)
1610 free_irq(channel->irq, channel);
1615 void falcon_fini_interrupt(struct efx_nic *efx)
1617 struct efx_channel *channel;
1620 /* Disable MSI/MSI-X interrupts */
1621 efx_for_each_channel(channel, efx) {
1623 free_irq(channel->irq, channel);
1626 /* ACK legacy interrupt */
1627 if (falcon_rev(efx) >= FALCON_REV_B0)
1628 falcon_read(efx, ®, INT_ISR0_B0);
1630 falcon_irq_ack_a1(efx);
1632 /* Disable legacy interrupt */
1633 if (efx->legacy_irq)
1634 free_irq(efx->legacy_irq, efx);
1637 /**************************************************************************
1641 **************************************************************************
1644 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1646 static int falcon_spi_poll(struct efx_nic *efx)
1649 falcon_read(efx, ®, EE_SPI_HCMD_REG_KER);
1650 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1653 /* Wait for SPI command completion */
1654 static int falcon_spi_wait(struct efx_nic *efx)
1656 /* Most commands will finish quickly, so we start polling at
1657 * very short intervals. Sometimes the command may have to
1658 * wait for VPD or expansion ROM access outside of our
1659 * control, so we allow up to 100 ms. */
1660 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
1663 for (i = 0; i < 10; i++) {
1664 if (!falcon_spi_poll(efx))
1670 if (!falcon_spi_poll(efx))
1672 if (time_after_eq(jiffies, timeout)) {
1673 EFX_ERR(efx, "timed out waiting for SPI\n");
1676 schedule_timeout_uninterruptible(1);
1680 int falcon_spi_cmd(const struct efx_spi_device *spi,
1681 unsigned int command, int address,
1682 const void *in, void *out, size_t len)
1684 struct efx_nic *efx = spi->efx;
1685 bool addressed = (address >= 0);
1686 bool reading = (out != NULL);
1690 /* Input validation */
1691 if (len > FALCON_SPI_MAX_LEN)
1693 BUG_ON(!mutex_is_locked(&efx->spi_lock));
1695 /* Check that previous command is not still running */
1696 rc = falcon_spi_poll(efx);
1700 /* Program address register, if we have an address */
1702 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
1703 falcon_write(efx, ®, EE_SPI_HADR_REG_KER);
1706 /* Program data register, if we have data */
1708 memcpy(®, in, len);
1709 falcon_write(efx, ®, EE_SPI_HDATA_REG_KER);
1712 /* Issue read/write command */
1713 EFX_POPULATE_OWORD_7(reg,
1714 EE_SPI_HCMD_CMD_EN, 1,
1715 EE_SPI_HCMD_SF_SEL, spi->device_id,
1716 EE_SPI_HCMD_DABCNT, len,
1717 EE_SPI_HCMD_READ, reading,
1718 EE_SPI_HCMD_DUBCNT, 0,
1720 (addressed ? spi->addr_len : 0),
1721 EE_SPI_HCMD_ENC, command);
1722 falcon_write(efx, ®, EE_SPI_HCMD_REG_KER);
1724 /* Wait for read/write to complete */
1725 rc = falcon_spi_wait(efx);
1731 falcon_read(efx, ®, EE_SPI_HDATA_REG_KER);
1732 memcpy(out, ®, len);
1739 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1741 return min(FALCON_SPI_MAX_LEN,
1742 (spi->block_size - (start & (spi->block_size - 1))));
1746 efx_spi_munge_command(const struct efx_spi_device *spi,
1747 const u8 command, const unsigned int address)
1749 return command | (((address >> 8) & spi->munge_address) << 3);
1752 /* Wait up to 10 ms for buffered write completion */
1753 int falcon_spi_wait_write(const struct efx_spi_device *spi)
1755 struct efx_nic *efx = spi->efx;
1756 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1761 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1762 &status, sizeof(status));
1765 if (!(status & SPI_STATUS_NRDY))
1767 if (time_after_eq(jiffies, timeout)) {
1768 EFX_ERR(efx, "SPI write timeout on device %d"
1769 " last status=0x%02x\n",
1770 spi->device_id, status);
1773 schedule_timeout_uninterruptible(1);
1777 int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1778 size_t len, size_t *retlen, u8 *buffer)
1780 size_t block_len, pos = 0;
1781 unsigned int command;
1785 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1787 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1788 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
1789 buffer + pos, block_len);
1794 /* Avoid locking up the system */
1796 if (signal_pending(current)) {
1807 int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1808 size_t len, size_t *retlen, const u8 *buffer)
1810 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1811 size_t block_len, pos = 0;
1812 unsigned int command;
1816 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
1820 block_len = min(len - pos,
1821 falcon_spi_write_limit(spi, start + pos));
1822 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1823 rc = falcon_spi_cmd(spi, command, start + pos,
1824 buffer + pos, NULL, block_len);
1828 rc = falcon_spi_wait_write(spi);
1832 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1833 rc = falcon_spi_cmd(spi, command, start + pos,
1834 NULL, verify_buffer, block_len);
1835 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1842 /* Avoid locking up the system */
1844 if (signal_pending(current)) {
1855 /**************************************************************************
1859 **************************************************************************
1862 static int falcon_reset_macs(struct efx_nic *efx)
1867 if (falcon_rev(efx) < FALCON_REV_B0) {
1868 /* It's not safe to use GLB_CTL_REG to reset the
1869 * macs, so instead use the internal MAC resets
1871 if (!EFX_IS10G(efx)) {
1872 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1);
1873 falcon_write(efx, ®, GM_CFG1_REG);
1876 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0);
1877 falcon_write(efx, ®, GM_CFG1_REG);
1881 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
1882 falcon_write(efx, ®, XM_GLB_CFG_REG);
1884 for (count = 0; count < 10000; count++) {
1885 falcon_read(efx, ®, XM_GLB_CFG_REG);
1886 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
1891 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1896 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1897 * the drain sequence with the statistics fetch */
1898 efx_stats_disable(efx);
1900 falcon_read(efx, ®, MAC0_CTRL_REG_KER);
1901 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
1902 falcon_write(efx, ®, MAC0_CTRL_REG_KER);
1904 falcon_read(efx, ®, GLB_CTL_REG_KER);
1905 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1);
1906 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1);
1907 EFX_SET_OWORD_FIELD(reg, RST_EM, 1);
1908 falcon_write(efx, ®, GLB_CTL_REG_KER);
1912 falcon_read(efx, ®, GLB_CTL_REG_KER);
1913 if (!EFX_OWORD_FIELD(reg, RST_XGTX) &&
1914 !EFX_OWORD_FIELD(reg, RST_XGRX) &&
1915 !EFX_OWORD_FIELD(reg, RST_EM)) {
1916 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1921 EFX_ERR(efx, "MAC reset failed\n");
1928 efx_stats_enable(efx);
1930 /* If we've reset the EM block and the link is up, then
1931 * we'll have to kick the XAUI link so the PHY can recover */
1932 if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1933 falcon_reset_xaui(efx);
1938 void falcon_drain_tx_fifo(struct efx_nic *efx)
1942 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1943 (efx->loopback_mode != LOOPBACK_NONE))
1946 falcon_read(efx, ®, MAC0_CTRL_REG_KER);
1947 /* There is no point in draining more than once */
1948 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0))
1951 falcon_reset_macs(efx);
1954 void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1958 if (falcon_rev(efx) < FALCON_REV_B0)
1961 /* Isolate the MAC -> RX */
1962 falcon_read(efx, ®, RX_CFG_REG_KER);
1963 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0);
1964 falcon_write(efx, ®, RX_CFG_REG_KER);
1967 falcon_drain_tx_fifo(efx);
1970 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1976 switch (efx->link_speed) {
1977 case 10000: link_speed = 3; break;
1978 case 1000: link_speed = 2; break;
1979 case 100: link_speed = 1; break;
1980 default: link_speed = 0; break;
1982 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1983 * as advertised. Disable to ensure packets are not
1984 * indefinitely held and TX queue can be flushed at any point
1985 * while the link is down. */
1986 EFX_POPULATE_OWORD_5(reg,
1987 MAC_XOFF_VAL, 0xffff /* max pause time */,
1989 MAC_UC_PROM, efx->promiscuous,
1990 MAC_LINK_STATUS, 1, /* always set */
1991 MAC_SPEED, link_speed);
1992 /* On B0, MAC backpressure can be disabled and packets get
1994 if (falcon_rev(efx) >= FALCON_REV_B0) {
1995 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1999 falcon_write(efx, ®, MAC0_CTRL_REG_KER);
2001 /* Restore the multicast hash registers. */
2002 falcon_set_multicast_hash(efx);
2004 /* Transmission of pause frames when RX crosses the threshold is
2005 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2006 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2007 tx_fc = !!(efx->link_fc & EFX_FC_TX);
2008 falcon_read(efx, ®, RX_CFG_REG_KER);
2009 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
2011 /* Unisolate the MAC -> RX */
2012 if (falcon_rev(efx) >= FALCON_REV_B0)
2013 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
2014 falcon_write(efx, ®, RX_CFG_REG_KER);
2017 int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2023 if (disable_dma_stats)
2026 /* Statistics fetch will fail if the MAC is in TX drain */
2027 if (falcon_rev(efx) >= FALCON_REV_B0) {
2029 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
2030 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
2034 dma_done = (efx->stats_buffer.addr + done_offset);
2035 *dma_done = FALCON_STATS_NOT_DONE;
2036 wmb(); /* ensure done flag is clear */
2038 /* Initiate DMA transfer of stats */
2039 EFX_POPULATE_OWORD_2(reg,
2040 MAC_STAT_DMA_CMD, 1,
2042 efx->stats_buffer.dma_addr);
2043 falcon_write(efx, ®, MAC0_STAT_DMA_REG_KER);
2045 /* Wait for transfer to complete */
2046 for (i = 0; i < 400; i++) {
2047 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
2048 rmb(); /* Ensure the stats are valid. */
2054 EFX_ERR(efx, "timed out waiting for statistics\n");
2058 /**************************************************************************
2060 * PHY access via GMII
2062 **************************************************************************
2065 /* Use the top bit of the MII PHY id to indicate the PHY type
2066 * (1G/10G), with the remaining bits as the actual PHY id.
2068 * This allows us to avoid leaking information from the mii_if_info
2069 * structure into other data structures.
2071 #define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR)
2072 #define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
2073 #define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1)
2074 #define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1)
2075 #define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1))
2078 /* Packing the clause 45 port and device fields into a single value */
2079 #define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
2080 #define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH
2081 #define MD_DEV_ADR_COMP_LBN 0
2082 #define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH
2085 /* Wait for GMII access to complete */
2086 static int falcon_gmii_wait(struct efx_nic *efx)
2088 efx_dword_t md_stat;
2091 /* wait upto 50ms - taken max from datasheet */
2092 for (count = 0; count < 5000; count++) {
2093 falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
2094 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
2095 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
2096 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
2097 EFX_ERR(efx, "error from GMII access "
2099 EFX_DWORD_VAL(md_stat));
2106 EFX_ERR(efx, "timed out waiting for GMII\n");
2110 /* Writes a GMII register of a PHY connected to Falcon using MDIO. */
2111 static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
2112 int addr, int value)
2114 struct efx_nic *efx = netdev_priv(net_dev);
2115 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
2118 /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
2119 * chosen so that the only current user, Falcon, can take the
2120 * packed value and use them directly.
2121 * Fail to build if this assumption is broken.
2123 BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G);
2124 BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH);
2125 BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN);
2126 BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN);
2128 if (phy_id2 == PHY_ADDR_INVALID)
2131 /* See falcon_mdio_read for an explanation. */
2132 if (!(phy_id & FALCON_PHY_ID_10G)) {
2133 int mmd = ffs(efx->phy_op->mmds) - 1;
2134 EFX_TRACE(efx, "Fixing erroneous clause22 write\n");
2135 phy_id2 = mdio_clause45_pack(phy_id2, mmd)
2136 & FALCON_PHY_ID_ID_MASK;
2139 EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id,
2142 spin_lock_bh(&efx->phy_lock);
2144 /* Check MII not currently being accessed */
2145 if (falcon_gmii_wait(efx) != 0)
2148 /* Write the address/ID register */
2149 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
2150 falcon_write(efx, ®, MD_PHY_ADR_REG_KER);
2152 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2);
2153 falcon_write(efx, ®, MD_ID_REG_KER);
2156 EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
2157 falcon_write(efx, ®, MD_TXD_REG_KER);
2159 EFX_POPULATE_OWORD_2(reg,
2162 falcon_write(efx, ®, MD_CS_REG_KER);
2164 /* Wait for data to be written */
2165 if (falcon_gmii_wait(efx) != 0) {
2166 /* Abort the write operation */
2167 EFX_POPULATE_OWORD_2(reg,
2170 falcon_write(efx, ®, MD_CS_REG_KER);
2175 spin_unlock_bh(&efx->phy_lock);
2178 /* Reads a GMII register from a PHY connected to Falcon. If no value
2179 * could be read, -1 will be returned. */
2180 static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2182 struct efx_nic *efx = netdev_priv(net_dev);
2183 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2187 if (phy_addr == PHY_ADDR_INVALID)
2190 /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
2191 * but the generic Linux code does not make any distinction or have
2192 * any state for this.
2193 * We spot the case where someone tried to talk 22 to a 45 PHY and
2194 * redirect the request to the lowest numbered MMD as a clause45
2195 * request. This is enough to allow simple queries like id and link
2196 * state to succeed. TODO: We may need to do more in future.
2198 if (!(phy_id & FALCON_PHY_ID_10G)) {
2199 int mmd = ffs(efx->phy_op->mmds) - 1;
2200 EFX_TRACE(efx, "Fixing erroneous clause22 read\n");
2201 phy_addr = mdio_clause45_pack(phy_addr, mmd)
2202 & FALCON_PHY_ID_ID_MASK;
2205 spin_lock_bh(&efx->phy_lock);
2207 /* Check MII not currently being accessed */
2208 if (falcon_gmii_wait(efx) != 0)
2211 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
2212 falcon_write(efx, ®, MD_PHY_ADR_REG_KER);
2214 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr);
2215 falcon_write(efx, ®, MD_ID_REG_KER);
2217 /* Request data to be read */
2218 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
2219 falcon_write(efx, ®, MD_CS_REG_KER);
2221 /* Wait for data to become available */
2222 value = falcon_gmii_wait(efx);
2224 falcon_read(efx, ®, MD_RXD_REG_KER);
2225 value = EFX_OWORD_FIELD(reg, MD_RXD);
2226 EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n",
2227 phy_id, addr, value);
2229 /* Abort the read operation */
2230 EFX_POPULATE_OWORD_2(reg,
2233 falcon_write(efx, ®, MD_CS_REG_KER);
2235 EFX_LOG(efx, "read from GMII 0x%x register %02x, got "
2236 "error %d\n", phy_id, addr, value);
2240 spin_unlock_bh(&efx->phy_lock);
2245 static void falcon_init_mdio(struct mii_if_info *gmii)
2247 gmii->mdio_read = falcon_mdio_read;
2248 gmii->mdio_write = falcon_mdio_write;
2249 gmii->phy_id_mask = FALCON_PHY_ID_MASK;
2250 gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
2253 static int falcon_probe_phy(struct efx_nic *efx)
2255 switch (efx->phy_type) {
2256 case PHY_TYPE_SFX7101:
2257 efx->phy_op = &falcon_sfx7101_phy_ops;
2259 case PHY_TYPE_SFT9001A:
2260 case PHY_TYPE_SFT9001B:
2261 efx->phy_op = &falcon_sft9001_phy_ops;
2263 case PHY_TYPE_QT2022C2:
2264 case PHY_TYPE_QT2025C:
2265 efx->phy_op = &falcon_xfp_phy_ops;
2268 EFX_ERR(efx, "Unknown PHY type %d\n",
2273 if (efx->phy_op->macs & EFX_XMAC)
2274 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2275 (1 << LOOPBACK_XGXS) |
2276 (1 << LOOPBACK_XAUI));
2277 if (efx->phy_op->macs & EFX_GMAC)
2278 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2279 efx->loopback_modes |= efx->phy_op->loopbacks;
2284 int falcon_switch_mac(struct efx_nic *efx)
2286 struct efx_mac_operations *old_mac_op = efx->mac_op;
2287 efx_oword_t nic_stat;
2291 /* Don't try to fetch MAC stats while we're switching MACs */
2292 efx_stats_disable(efx);
2294 /* Internal loopbacks override the phy speed setting */
2295 if (efx->loopback_mode == LOOPBACK_GMAC) {
2296 efx->link_speed = 1000;
2297 efx->link_fd = true;
2298 } else if (LOOPBACK_INTERNAL(efx)) {
2299 efx->link_speed = 10000;
2300 efx->link_fd = true;
2303 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2304 efx->mac_op = (EFX_IS10G(efx) ?
2305 &falcon_xmac_operations : &falcon_gmac_operations);
2307 /* Always push the NIC_STAT_REG setting even if the mac hasn't
2308 * changed, because this function is run post online reset */
2309 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2310 strap_val = EFX_IS10G(efx) ? 5 : 3;
2311 if (falcon_rev(efx) >= FALCON_REV_B0) {
2312 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
2313 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
2314 falcon_write(efx, &nic_stat, NIC_STAT_REG);
2316 /* Falcon A1 does not support 1G/10G speed switching
2317 * and must not be used with a PHY that does. */
2318 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
2321 if (old_mac_op == efx->mac_op)
2324 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2325 /* Not all macs support a mac-level link state */
2328 rc = falcon_reset_macs(efx);
2330 efx_stats_enable(efx);
2334 /* This call is responsible for hooking in the MAC and PHY operations */
2335 int falcon_probe_port(struct efx_nic *efx)
2339 /* Hook in PHY operations table */
2340 rc = falcon_probe_phy(efx);
2344 /* Set up GMII structure for PHY */
2345 efx->mii.supports_gmii = true;
2346 falcon_init_mdio(&efx->mii);
2348 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2349 if (falcon_rev(efx) >= FALCON_REV_B0)
2350 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2352 efx->wanted_fc = EFX_FC_RX;
2354 /* Allocate buffer for stats */
2355 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2356 FALCON_MAC_STATS_SIZE);
2359 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
2360 (unsigned long long)efx->stats_buffer.dma_addr,
2361 efx->stats_buffer.addr,
2362 virt_to_phys(efx->stats_buffer.addr));
2367 void falcon_remove_port(struct efx_nic *efx)
2369 falcon_free_buffer(efx, &efx->stats_buffer);
2372 /**************************************************************************
2374 * Multicast filtering
2376 **************************************************************************
2379 void falcon_set_multicast_hash(struct efx_nic *efx)
2381 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2383 /* Broadcast packets go through the multicast hash filter.
2384 * ether_crc_le() of the broadcast address is 0xbe2612ff
2385 * so we always add bit 0xff to the mask.
2387 set_bit_le(0xff, mc_hash->byte);
2389 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
2390 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2394 /**************************************************************************
2398 **************************************************************************/
2400 int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2402 struct falcon_nvconfig *nvconfig;
2403 struct efx_spi_device *spi;
2405 int rc, magic_num, struct_ver;
2406 __le16 *word, *limit;
2409 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2413 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2416 nvconfig = region + NVCONFIG_OFFSET;
2418 mutex_lock(&efx->spi_lock);
2419 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2420 mutex_unlock(&efx->spi_lock);
2422 EFX_ERR(efx, "Failed to read %s\n",
2423 efx->spi_flash ? "flash" : "EEPROM");
2428 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2429 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2432 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
2433 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2436 if (struct_ver < 2) {
2437 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
2439 } else if (struct_ver < 4) {
2440 word = &nvconfig->board_magic_num;
2441 limit = (__le16 *) (nvconfig + 1);
2444 limit = region + FALCON_NVCONFIG_END;
2446 for (csum = 0; word < limit; ++word)
2447 csum += le16_to_cpu(*word);
2449 if (~csum & 0xffff) {
2450 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
2456 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
2463 /* Registers tested in the falcon register test */
2467 } efx_test_registers[] = {
2468 { ADR_REGION_REG_KER,
2469 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2471 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2473 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2475 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2476 { MAC0_CTRL_REG_KER,
2477 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2478 { SRM_TX_DC_CFG_REG_KER,
2479 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2480 { RX_DC_CFG_REG_KER,
2481 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2482 { RX_DC_PF_WM_REG_KER,
2483 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2485 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2487 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2489 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2491 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2493 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2495 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2497 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2499 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2501 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2503 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2506 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2507 const efx_oword_t *mask)
2509 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
2510 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2513 int falcon_test_registers(struct efx_nic *efx)
2515 unsigned address = 0, i, j;
2516 efx_oword_t mask, imask, original, reg, buf;
2518 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2519 WARN_ON(!LOOPBACK_INTERNAL(efx));
2521 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2522 address = efx_test_registers[i].address;
2523 mask = imask = efx_test_registers[i].mask;
2524 EFX_INVERT_OWORD(imask);
2526 falcon_read(efx, &original, address);
2528 /* bit sweep on and off */
2529 for (j = 0; j < 128; j++) {
2530 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2533 /* Test this testable bit can be set in isolation */
2534 EFX_AND_OWORD(reg, original, mask);
2535 EFX_SET_OWORD32(reg, j, j, 1);
2537 falcon_write(efx, ®, address);
2538 falcon_read(efx, &buf, address);
2540 if (efx_masked_compare_oword(®, &buf, &mask))
2543 /* Test this testable bit can be cleared in isolation */
2544 EFX_OR_OWORD(reg, original, mask);
2545 EFX_SET_OWORD32(reg, j, j, 0);
2547 falcon_write(efx, ®, address);
2548 falcon_read(efx, &buf, address);
2550 if (efx_masked_compare_oword(®, &buf, &mask))
2554 falcon_write(efx, &original, address);
2560 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2561 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2562 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2566 /**************************************************************************
2570 **************************************************************************
2573 /* Resets NIC to known state. This routine must be called in process
2574 * context and is allowed to sleep. */
2575 int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2577 struct falcon_nic_data *nic_data = efx->nic_data;
2578 efx_oword_t glb_ctl_reg_ker;
2581 EFX_LOG(efx, "performing hardware reset (%d)\n", method);
2583 /* Initiate device reset */
2584 if (method == RESET_TYPE_WORLD) {
2585 rc = pci_save_state(efx->pci_dev);
2587 EFX_ERR(efx, "failed to backup PCI state of primary "
2588 "function prior to hardware reset\n");
2591 if (FALCON_IS_DUAL_FUNC(efx)) {
2592 rc = pci_save_state(nic_data->pci_dev2);
2594 EFX_ERR(efx, "failed to backup PCI state of "
2595 "secondary function prior to "
2596 "hardware reset\n");
2601 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2602 EXT_PHY_RST_DUR, 0x7,
2605 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2606 EXCLUDE_FROM_RESET : 0);
2608 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2609 EXT_PHY_RST_CTL, reset_phy,
2610 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
2611 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
2612 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
2613 EE_RST_CTL, EXCLUDE_FROM_RESET,
2614 EXT_PHY_RST_DUR, 0x7 /* 10ms */,
2617 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2619 EFX_LOG(efx, "waiting for hardware reset\n");
2620 schedule_timeout_uninterruptible(HZ / 20);
2622 /* Restore PCI configuration if needed */
2623 if (method == RESET_TYPE_WORLD) {
2624 if (FALCON_IS_DUAL_FUNC(efx)) {
2625 rc = pci_restore_state(nic_data->pci_dev2);
2627 EFX_ERR(efx, "failed to restore PCI config for "
2628 "the secondary function\n");
2632 rc = pci_restore_state(efx->pci_dev);
2634 EFX_ERR(efx, "failed to restore PCI config for the "
2635 "primary function\n");
2638 EFX_LOG(efx, "successfully restored PCI config\n");
2641 /* Assert that reset complete */
2642 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2643 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
2645 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2648 EFX_LOG(efx, "hardware reset complete\n");
2652 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2655 pci_restore_state(efx->pci_dev);
2662 /* Zeroes out the SRAM contents. This routine must be called in
2663 * process context and is allowed to sleep.
2665 static int falcon_reset_sram(struct efx_nic *efx)
2667 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2670 /* Set the SRAM wake/sleep GPIO appropriately. */
2671 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2672 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
2673 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
2674 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2676 /* Initiate SRAM reset */
2677 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2678 SRAM_OOB_BT_INIT_EN, 1,
2679 SRM_NUM_BANKS_AND_BANK_SIZE, 0);
2680 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2682 /* Wait for SRAM reset to complete */
2685 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2687 /* SRAM reset is slow; expect around 16ms */
2688 schedule_timeout_uninterruptible(HZ / 50);
2690 /* Check for reset complete */
2691 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2692 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
2693 EFX_LOG(efx, "SRAM reset complete\n");
2697 } while (++count < 20); /* wait upto 0.4 sec */
2699 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2703 static int falcon_spi_device_init(struct efx_nic *efx,
2704 struct efx_spi_device **spi_device_ret,
2705 unsigned int device_id, u32 device_type)
2707 struct efx_spi_device *spi_device;
2709 if (device_type != 0) {
2710 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2713 spi_device->device_id = device_id;
2715 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2716 spi_device->addr_len =
2717 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2718 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2719 spi_device->addr_len == 1);
2720 spi_device->erase_command =
2721 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2722 spi_device->erase_size =
2723 1 << SPI_DEV_TYPE_FIELD(device_type,
2724 SPI_DEV_TYPE_ERASE_SIZE);
2725 spi_device->block_size =
2726 1 << SPI_DEV_TYPE_FIELD(device_type,
2727 SPI_DEV_TYPE_BLOCK_SIZE);
2729 spi_device->efx = efx;
2734 kfree(*spi_device_ret);
2735 *spi_device_ret = spi_device;
2740 static void falcon_remove_spi_devices(struct efx_nic *efx)
2742 kfree(efx->spi_eeprom);
2743 efx->spi_eeprom = NULL;
2744 kfree(efx->spi_flash);
2745 efx->spi_flash = NULL;
2748 /* Extract non-volatile configuration */
2749 static int falcon_probe_nvconfig(struct efx_nic *efx)
2751 struct falcon_nvconfig *nvconfig;
2755 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2759 rc = falcon_read_nvram(efx, nvconfig);
2760 if (rc == -EINVAL) {
2761 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2762 efx->phy_type = PHY_TYPE_NONE;
2763 efx->mii.phy_id = PHY_ADDR_INVALID;
2769 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2770 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2772 efx->phy_type = v2->port0_phy_type;
2773 efx->mii.phy_id = v2->port0_phy_addr;
2774 board_rev = le16_to_cpu(v2->board_revision);
2776 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2777 __le32 fl = v3->spi_device_type[EE_SPI_FLASH];
2778 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
2779 rc = falcon_spi_device_init(efx, &efx->spi_flash,
2784 rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
2792 /* Read the MAC addresses */
2793 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2795 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
2797 efx_set_board_info(efx, board_rev);
2803 falcon_remove_spi_devices(efx);
2809 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2810 * count, port speed). Set workaround and feature flags accordingly.
2812 static int falcon_probe_nic_variant(struct efx_nic *efx)
2814 efx_oword_t altera_build;
2815 efx_oword_t nic_stat;
2817 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2818 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
2819 EFX_ERR(efx, "Falcon FPGA not supported\n");
2823 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2825 switch (falcon_rev(efx)) {
2828 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2832 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2833 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2842 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2846 /* Initial assumed speed */
2847 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
2852 /* Probe all SPI devices on the NIC */
2853 static void falcon_probe_spi_devices(struct efx_nic *efx)
2855 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2858 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
2859 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2860 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2862 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) {
2863 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ?
2864 EE_SPI_FLASH : EE_SPI_EEPROM);
2865 EFX_LOG(efx, "Booted from %s\n",
2866 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM");
2868 /* Disable VPD and set clock dividers to safe
2869 * values for initial programming. */
2871 EFX_LOG(efx, "Booted from internal ASIC settings;"
2872 " setting SPI config\n");
2873 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2874 /* 125 MHz / 7 ~= 20 MHz */
2876 /* 125 MHz / 63 ~= 2 MHz */
2877 EE_EE_CLOCK_DIV, 63);
2878 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2881 if (boot_dev == EE_SPI_FLASH)
2882 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH,
2883 default_flash_type);
2884 if (boot_dev == EE_SPI_EEPROM)
2885 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM,
2889 int falcon_probe_nic(struct efx_nic *efx)
2891 struct falcon_nic_data *nic_data;
2894 /* Allocate storage for hardware specific data */
2895 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2898 efx->nic_data = nic_data;
2900 /* Determine number of ports etc. */
2901 rc = falcon_probe_nic_variant(efx);
2905 /* Probe secondary function if expected */
2906 if (FALCON_IS_DUAL_FUNC(efx)) {
2907 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2909 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2911 if (dev->bus == efx->pci_dev->bus &&
2912 dev->devfn == efx->pci_dev->devfn + 1) {
2913 nic_data->pci_dev2 = dev;
2917 if (!nic_data->pci_dev2) {
2918 EFX_ERR(efx, "failed to find secondary function\n");
2924 /* Now we can reset the NIC */
2925 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2927 EFX_ERR(efx, "failed to reset NIC\n");
2931 /* Allocate memory for INT_KER */
2932 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2935 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2937 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
2938 (unsigned long long)efx->irq_status.dma_addr,
2939 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
2941 falcon_probe_spi_devices(efx);
2943 /* Read in the non-volatile configuration */
2944 rc = falcon_probe_nvconfig(efx);
2948 /* Initialise I2C adapter */
2949 efx->i2c_adap.owner = THIS_MODULE;
2950 nic_data->i2c_data = falcon_i2c_bit_operations;
2951 nic_data->i2c_data.data = efx;
2952 efx->i2c_adap.algo_data = &nic_data->i2c_data;
2953 efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2954 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
2955 rc = i2c_bit_add_bus(&efx->i2c_adap);
2962 falcon_remove_spi_devices(efx);
2963 falcon_free_buffer(efx, &efx->irq_status);
2966 if (nic_data->pci_dev2) {
2967 pci_dev_put(nic_data->pci_dev2);
2968 nic_data->pci_dev2 = NULL;
2972 kfree(efx->nic_data);
2976 /* This call performs hardware-specific global initialisation, such as
2977 * defining the descriptor cache sizes and number of RSS channels.
2978 * It does not set up any buffers, descriptor rings or event queues.
2980 int falcon_init_nic(struct efx_nic *efx)
2986 /* Use on-chip SRAM */
2987 falcon_read(efx, &temp, NIC_STAT_REG);
2988 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
2989 falcon_write(efx, &temp, NIC_STAT_REG);
2991 /* Set the source of the GMAC clock */
2992 if (falcon_rev(efx) == FALCON_REV_B0) {
2993 falcon_read(efx, &temp, GPIO_CTL_REG_KER);
2994 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true);
2995 falcon_write(efx, &temp, GPIO_CTL_REG_KER);
2998 /* Set buffer table mode */
2999 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
3000 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
3002 rc = falcon_reset_sram(efx);
3006 /* Set positions of descriptor caches in SRAM. */
3007 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
3008 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
3009 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
3010 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
3012 /* Set TX descriptor cache size. */
3013 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
3014 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
3015 falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
3017 /* Set RX descriptor cache size. Set low watermark to size-8, as
3018 * this allows most efficient prefetching.
3020 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
3021 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
3022 falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
3023 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
3024 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
3026 /* Clear the parity enables on the TX data fifos as
3027 * they produce false parity errors because of timing issues
3029 if (EFX_WORKAROUND_5129(efx)) {
3030 falcon_read(efx, &temp, SPARE_REG_KER);
3031 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
3032 falcon_write(efx, &temp, SPARE_REG_KER);
3035 /* Enable all the genuinely fatal interrupts. (They are still
3036 * masked by the overall interrupt mask, controlled by
3037 * falcon_interrupts()).
3039 * Note: All other fatal interrupts are enabled
3041 EFX_POPULATE_OWORD_3(temp,
3042 ILL_ADR_INT_KER_EN, 1,
3043 RBUF_OWN_INT_KER_EN, 1,
3044 TBUF_OWN_INT_KER_EN, 1);
3045 EFX_INVERT_OWORD(temp);
3046 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
3048 if (EFX_WORKAROUND_7244(efx)) {
3049 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
3050 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
3051 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
3052 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
3053 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
3054 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
3057 falcon_setup_rss_indir_table(efx);
3059 /* Setup RX. Wait for descriptor is broken and must
3060 * be disabled. RXDP recovery shouldn't be needed, but is.
3062 falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
3063 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
3064 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
3065 if (EFX_WORKAROUND_5583(efx))
3066 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
3067 falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
3069 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3070 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3072 falcon_read(efx, &temp, TX_CFG2_REG_KER);
3073 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
3074 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
3075 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
3076 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
3077 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
3078 /* Enable SW_EV to inherit in char driver - assume harmless here */
3079 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
3080 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3081 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
3082 /* Squash TX of packets of 16 bytes or less */
3083 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3084 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
3085 falcon_write(efx, &temp, TX_CFG2_REG_KER);
3087 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3088 * descriptors (which is bad).
3090 falcon_read(efx, &temp, TX_CFG_REG_KER);
3091 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
3092 falcon_write(efx, &temp, TX_CFG_REG_KER);
3095 falcon_read(efx, &temp, RX_CFG_REG_KER);
3096 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
3097 if (EFX_WORKAROUND_7575(efx))
3098 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
3100 if (falcon_rev(efx) >= FALCON_REV_B0)
3101 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
3103 /* RX FIFO flow control thresholds */
3104 thresh = ((rx_xon_thresh_bytes >= 0) ?
3105 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
3106 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
3107 thresh = ((rx_xoff_thresh_bytes >= 0) ?
3108 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
3109 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
3110 /* RX control FIFO thresholds [32 entries] */
3111 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
3112 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
3113 falcon_write(efx, &temp, RX_CFG_REG_KER);
3115 /* Set destination of both TX and RX Flush events */
3116 if (falcon_rev(efx) >= FALCON_REV_B0) {
3117 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
3118 falcon_write(efx, &temp, DP_CTRL_REG);
3124 void falcon_remove_nic(struct efx_nic *efx)
3126 struct falcon_nic_data *nic_data = efx->nic_data;
3129 /* Remove I2C adapter and clear it in preparation for a retry */
3130 rc = i2c_del_adapter(&efx->i2c_adap);
3132 memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
3134 falcon_remove_spi_devices(efx);
3135 falcon_free_buffer(efx, &efx->irq_status);
3137 falcon_reset_hw(efx, RESET_TYPE_ALL);
3139 /* Release the second function after the reset */
3140 if (nic_data->pci_dev2) {
3141 pci_dev_put(nic_data->pci_dev2);
3142 nic_data->pci_dev2 = NULL;
3145 /* Tear down the private nic state */
3146 kfree(efx->nic_data);
3147 efx->nic_data = NULL;
3150 void falcon_update_nic_stats(struct efx_nic *efx)
3154 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
3155 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
3158 /**************************************************************************
3160 * Revision-dependent attributes used by efx.c
3162 **************************************************************************
3165 struct efx_nic_type falcon_a_nic_type = {
3167 .mem_map_size = 0x20000,
3168 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
3169 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
3170 .buf_tbl_base = BUF_TBL_KER_A1,
3171 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
3172 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
3173 .txd_ring_mask = FALCON_TXD_RING_MASK,
3174 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3175 .evq_size = FALCON_EVQ_SIZE,
3176 .max_dma_mask = FALCON_DMA_MASK,
3177 .tx_dma_mask = FALCON_TX_DMA_MASK,
3178 .bug5391_mask = 0xf,
3179 .rx_xoff_thresh = 2048,
3180 .rx_xon_thresh = 512,
3181 .rx_buffer_padding = 0x24,
3182 .max_interrupt_mode = EFX_INT_MODE_MSI,
3183 .phys_addr_channels = 4,
3186 struct efx_nic_type falcon_b_nic_type = {
3188 /* Map everything up to and including the RSS indirection
3189 * table. Don't map MSI-X table, MSI-X PBA since Linux
3190 * requires that they not be mapped. */
3191 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
3192 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
3193 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
3194 .buf_tbl_base = BUF_TBL_KER_B0,
3195 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
3196 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
3197 .txd_ring_mask = FALCON_TXD_RING_MASK,
3198 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3199 .evq_size = FALCON_EVQ_SIZE,
3200 .max_dma_mask = FALCON_DMA_MASK,
3201 .tx_dma_mask = FALCON_TX_DMA_MASK,
3203 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
3204 .rx_xon_thresh = 27648, /* ~3*max MTU */
3205 .rx_buffer_padding = 0,
3206 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3207 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
3208 * interrupt handler only supports 32