1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include "net_driver.h"
11 #include "ef10_regs.h"
14 #include "mcdi_pcol.h"
16 #include "workarounds.h"
19 #include <linux/jhash.h>
20 #include <linux/wait.h>
21 #include <linux/workqueue.h>
23 /* Hardware control for EF10 architecture including 'Huntington'. */
25 #define EFX_EF10_DRVGEN_EV 7
31 /* The reserved RSS context value */
32 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
34 /* The filter table(s) are managed by firmware and we have write-only
35 * access. When removing filters we must identify them to the
36 * firmware by a 64-bit handle, but this is too wide for Linux kernel
37 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
38 * be able to tell in advance whether a requested insertion will
39 * replace an existing filter. Therefore we maintain a software hash
40 * table, which should be at least as large as the hardware hash
43 * Huntington has a single 8K filter table shared between all filter
44 * types and both ports.
46 #define HUNT_FILTER_TBL_ROWS 8192
48 struct efx_ef10_filter_table {
49 /* The RX match field masks supported by this fw & hw, in order of priority */
50 enum efx_filter_match_flags rx_match_flags[
51 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
52 unsigned int rx_match_count;
55 unsigned long spec; /* pointer to spec plus flag bits */
56 /* BUSY flag indicates that an update is in progress. STACK_OLD is
57 * used to mark and sweep stack-owned MAC filters.
59 #define EFX_EF10_FILTER_FLAG_BUSY 1UL
60 #define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
61 #define EFX_EF10_FILTER_FLAGS 3UL
62 u64 handle; /* firmware handle */
64 wait_queue_head_t waitq;
65 /* Shadow of net_device address lists, guarded by mac_lock */
66 #define EFX_EF10_FILTER_STACK_UC_MAX 32
67 #define EFX_EF10_FILTER_STACK_MC_MAX 256
71 } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
72 stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
73 int stack_uc_count; /* negative for PROMISC */
74 int stack_mc_count; /* negative for PROMISC/ALLMULTI */
77 /* An arbitrary search limit for the software hash table */
78 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
80 static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
81 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
82 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
84 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
88 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS);
89 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
90 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
93 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
95 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
98 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
100 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
101 struct efx_ef10_nic_data *nic_data = efx->nic_data;
105 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
107 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
108 outbuf, sizeof(outbuf), &outlen);
111 if (outlen < sizeof(outbuf)) {
112 netif_err(efx, drv, efx->net_dev,
113 "unable to read datapath firmware capabilities\n");
117 nic_data->datapath_caps =
118 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
120 if (!(nic_data->datapath_caps &
121 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
122 netif_err(efx, drv, efx->net_dev,
123 "current firmware does not support TSO\n");
127 if (!(nic_data->datapath_caps &
128 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
129 netif_err(efx, probe, efx->net_dev,
130 "current firmware does not support an RX prefix\n");
137 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
139 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
142 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
143 outbuf, sizeof(outbuf), NULL);
146 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
147 return rc > 0 ? rc : -ERANGE;
150 static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
152 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
156 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
158 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
159 outbuf, sizeof(outbuf), &outlen);
162 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
166 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
170 static int efx_ef10_probe(struct efx_nic *efx)
172 struct efx_ef10_nic_data *nic_data;
175 /* We can have one VI for each 8K region. However we need
176 * multiple TX queues per channel.
181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
183 BUG_ON(efx->max_channels == 0);
185 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
188 efx->nic_data = nic_data;
190 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
191 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
195 /* Get the MC's warm boot count. In case it's rebooting right
196 * now, be prepared to retry.
200 rc = efx_ef10_get_warm_boot_count(efx);
207 nic_data->warm_boot_count = rc;
209 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
211 /* In case we're recovering from a crash (kexec), we want to
212 * cancel any outstanding request by the previous user of this
213 * function. We send a special message using the least
214 * significant bits of the 'high' (doorbell) register.
216 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
218 rc = efx_mcdi_init(efx);
222 /* Reset (most) configuration for this function */
223 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
227 /* Enable event logging */
228 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
232 rc = efx_ef10_init_datapath_caps(efx);
236 efx->rx_packet_len_offset =
237 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
239 rc = efx_mcdi_port_get_number(efx);
244 rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
248 rc = efx_ef10_get_sysclk_freq(efx);
251 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
253 /* Check whether firmware supports bug 35388 workaround */
254 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
256 nic_data->workaround_35388 = true;
257 else if (rc != -ENOSYS && rc != -ENOENT)
259 netif_dbg(efx, probe, efx->net_dev,
260 "workaround for bug 35388 is %sabled\n",
261 nic_data->workaround_35388 ? "en" : "dis");
263 rc = efx_mcdi_mon_probe(efx);
272 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
275 efx->nic_data = NULL;
279 static int efx_ef10_free_vis(struct efx_nic *efx)
281 int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
283 /* -EALREADY means nothing to free, so ignore */
291 static void efx_ef10_free_piobufs(struct efx_nic *efx)
293 struct efx_ef10_nic_data *nic_data = efx->nic_data;
294 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
298 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
300 for (i = 0; i < nic_data->n_piobufs; i++) {
301 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
302 nic_data->piobuf_handle[i]);
303 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
308 nic_data->n_piobufs = 0;
311 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
313 struct efx_ef10_nic_data *nic_data = efx->nic_data;
314 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
319 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
321 for (i = 0; i < n; i++) {
322 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
323 outbuf, sizeof(outbuf), &outlen);
326 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
330 nic_data->piobuf_handle[i] =
331 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
332 netif_dbg(efx, probe, efx->net_dev,
333 "allocated PIO buffer %u handle %x\n", i,
334 nic_data->piobuf_handle[i]);
337 nic_data->n_piobufs = i;
339 efx_ef10_free_piobufs(efx);
343 static int efx_ef10_link_piobufs(struct efx_nic *efx)
345 struct efx_ef10_nic_data *nic_data = efx->nic_data;
346 MCDI_DECLARE_BUF(inbuf,
347 max(MC_CMD_LINK_PIOBUF_IN_LEN,
348 MC_CMD_UNLINK_PIOBUF_IN_LEN));
349 struct efx_channel *channel;
350 struct efx_tx_queue *tx_queue;
351 unsigned int offset, index;
354 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
355 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
357 /* Link a buffer to each VI in the write-combining mapping */
358 for (index = 0; index < nic_data->n_piobufs; ++index) {
359 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
360 nic_data->piobuf_handle[index]);
361 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
362 nic_data->pio_write_vi_base + index);
363 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
364 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
367 netif_err(efx, drv, efx->net_dev,
368 "failed to link VI %u to PIO buffer %u (%d)\n",
369 nic_data->pio_write_vi_base + index, index,
373 netif_dbg(efx, probe, efx->net_dev,
374 "linked VI %u to PIO buffer %u\n",
375 nic_data->pio_write_vi_base + index, index);
378 /* Link a buffer to each TX queue */
379 efx_for_each_channel(channel, efx) {
380 efx_for_each_channel_tx_queue(tx_queue, channel) {
381 /* We assign the PIO buffers to queues in
382 * reverse order to allow for the following
385 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
386 tx_queue->channel->channel - 1) *
388 index = offset / ER_DZ_TX_PIOBUF_SIZE;
389 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
391 /* When the host page size is 4K, the first
392 * host page in the WC mapping may be within
393 * the same VI page as the last TX queue. We
394 * can only link one buffer to each VI.
396 if (tx_queue->queue == nic_data->pio_write_vi_base) {
400 MCDI_SET_DWORD(inbuf,
401 LINK_PIOBUF_IN_PIOBUF_HANDLE,
402 nic_data->piobuf_handle[index]);
403 MCDI_SET_DWORD(inbuf,
404 LINK_PIOBUF_IN_TXQ_INSTANCE,
406 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
407 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
412 /* This is non-fatal; the TX path just
413 * won't use PIO for this queue
415 netif_err(efx, drv, efx->net_dev,
416 "failed to link VI %u to PIO buffer %u (%d)\n",
417 tx_queue->queue, index, rc);
418 tx_queue->piobuf = NULL;
421 nic_data->pio_write_base +
422 index * EFX_VI_PAGE_SIZE + offset;
423 tx_queue->piobuf_offset = offset;
424 netif_dbg(efx, probe, efx->net_dev,
425 "linked VI %u to PIO buffer %u offset %x addr %p\n",
426 tx_queue->queue, index,
427 tx_queue->piobuf_offset,
437 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
438 nic_data->pio_write_vi_base + index);
439 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
440 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
446 #else /* !EFX_USE_PIO */
448 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
450 return n == 0 ? 0 : -ENOBUFS;
453 static int efx_ef10_link_piobufs(struct efx_nic *efx)
458 static void efx_ef10_free_piobufs(struct efx_nic *efx)
462 #endif /* EFX_USE_PIO */
464 static void efx_ef10_remove(struct efx_nic *efx)
466 struct efx_ef10_nic_data *nic_data = efx->nic_data;
469 efx_mcdi_mon_remove(efx);
471 /* This needs to be after efx_ptp_remove_channel() with no filters */
472 efx_ef10_rx_free_indir_table(efx);
474 if (nic_data->wc_membase)
475 iounmap(nic_data->wc_membase);
477 rc = efx_ef10_free_vis(efx);
480 if (!nic_data->must_restore_piobufs)
481 efx_ef10_free_piobufs(efx);
484 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
488 static int efx_ef10_alloc_vis(struct efx_nic *efx,
489 unsigned int min_vis, unsigned int max_vis)
491 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
492 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
493 struct efx_ef10_nic_data *nic_data = efx->nic_data;
497 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
498 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
499 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
500 outbuf, sizeof(outbuf), &outlen);
504 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
507 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
508 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
510 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
511 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
515 /* Note that the failure path of this function does not free
516 * resources, as this will be done by efx_ef10_remove().
518 static int efx_ef10_dimension_resources(struct efx_nic *efx)
520 struct efx_ef10_nic_data *nic_data = efx->nic_data;
521 unsigned int uc_mem_map_size, wc_mem_map_size;
522 unsigned int min_vis, pio_write_vi_base, max_vis;
523 void __iomem *membase;
526 min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
529 /* Try to allocate PIO buffers if wanted and if the full
530 * number of PIO buffers would be sufficient to allocate one
531 * copy-buffer per TX channel. Failure is non-fatal, as there
532 * are only a small number of PIO buffers shared between all
533 * functions of the controller.
535 if (efx_piobuf_size != 0 &&
536 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
537 efx->n_tx_channels) {
538 unsigned int n_piobufs =
539 DIV_ROUND_UP(efx->n_tx_channels,
540 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
542 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
544 netif_err(efx, probe, efx->net_dev,
545 "failed to allocate PIO buffers (%d)\n", rc);
547 netif_dbg(efx, probe, efx->net_dev,
548 "allocated %u PIO buffers\n", n_piobufs);
551 nic_data->n_piobufs = 0;
554 /* PIO buffers should be mapped with write-combining enabled,
555 * and we want to make single UC and WC mappings rather than
556 * several of each (in fact that's the only option if host
557 * page size is >4K). So we may allocate some extra VIs just
558 * for writing PIO buffers through.
560 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
562 if (nic_data->n_piobufs) {
563 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
564 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
565 nic_data->n_piobufs) *
568 max_vis = pio_write_vi_base + nic_data->n_piobufs;
570 pio_write_vi_base = 0;
575 /* In case the last attached driver failed to free VIs, do it now */
576 rc = efx_ef10_free_vis(efx);
580 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
584 /* If we didn't get enough VIs to map all the PIO buffers, free the
587 if (nic_data->n_piobufs &&
588 nic_data->n_allocated_vis <
589 pio_write_vi_base + nic_data->n_piobufs) {
590 netif_dbg(efx, probe, efx->net_dev,
591 "%u VIs are not sufficient to map %u PIO buffers\n",
592 nic_data->n_allocated_vis, nic_data->n_piobufs);
593 efx_ef10_free_piobufs(efx);
596 /* Shrink the original UC mapping of the memory BAR */
597 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
599 netif_err(efx, probe, efx->net_dev,
600 "could not shrink memory BAR to %x\n",
604 iounmap(efx->membase);
605 efx->membase = membase;
607 /* Set up the WC mapping if needed */
608 if (wc_mem_map_size) {
609 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
612 if (!nic_data->wc_membase) {
613 netif_err(efx, probe, efx->net_dev,
614 "could not allocate WC mapping of size %x\n",
618 nic_data->pio_write_vi_base = pio_write_vi_base;
619 nic_data->pio_write_base =
620 nic_data->wc_membase +
621 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
624 rc = efx_ef10_link_piobufs(efx);
626 efx_ef10_free_piobufs(efx);
629 netif_dbg(efx, probe, efx->net_dev,
630 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
631 &efx->membase_phys, efx->membase, uc_mem_map_size,
632 nic_data->wc_membase, wc_mem_map_size);
637 static int efx_ef10_init_nic(struct efx_nic *efx)
639 struct efx_ef10_nic_data *nic_data = efx->nic_data;
642 if (nic_data->must_check_datapath_caps) {
643 rc = efx_ef10_init_datapath_caps(efx);
646 nic_data->must_check_datapath_caps = false;
649 if (nic_data->must_realloc_vis) {
650 /* We cannot let the number of VIs change now */
651 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
652 nic_data->n_allocated_vis);
655 nic_data->must_realloc_vis = false;
658 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
659 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
661 rc = efx_ef10_link_piobufs(efx);
663 efx_ef10_free_piobufs(efx);
666 /* Log an error on failure, but this is non-fatal */
668 netif_err(efx, drv, efx->net_dev,
669 "failed to restore PIO buffers (%d)\n", rc);
670 nic_data->must_restore_piobufs = false;
673 efx_ef10_rx_push_indir_table(efx);
677 static int efx_ef10_map_reset_flags(u32 *flags)
680 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
681 ETH_RESET_SHARED_SHIFT),
682 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
683 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
684 ETH_RESET_PHY | ETH_RESET_MGMT) <<
685 ETH_RESET_SHARED_SHIFT)
688 /* We assume for now that our PCI function is permitted to
692 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
693 *flags &= ~EF10_RESET_MC;
694 return RESET_TYPE_WORLD;
697 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
698 *flags &= ~EF10_RESET_PORT;
699 return RESET_TYPE_ALL;
702 /* no invisible reset implemented */
707 #define EF10_DMA_STAT(ext_name, mcdi_name) \
708 [EF10_STAT_ ## ext_name] = \
709 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
710 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
711 [EF10_STAT_ ## int_name] = \
712 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
713 #define EF10_OTHER_STAT(ext_name) \
714 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
716 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
717 EF10_DMA_STAT(tx_bytes, TX_BYTES),
718 EF10_DMA_STAT(tx_packets, TX_PKTS),
719 EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
720 EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
721 EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
722 EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
723 EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
724 EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
725 EF10_DMA_STAT(tx_64, TX_64_PKTS),
726 EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
727 EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
728 EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
729 EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
730 EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
731 EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
732 EF10_DMA_STAT(rx_bytes, RX_BYTES),
733 EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
734 EF10_OTHER_STAT(rx_good_bytes),
735 EF10_OTHER_STAT(rx_bad_bytes),
736 EF10_DMA_STAT(rx_packets, RX_PKTS),
737 EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
738 EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
739 EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
740 EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
741 EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
742 EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
743 EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
744 EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
745 EF10_DMA_STAT(rx_64, RX_64_PKTS),
746 EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
747 EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
748 EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
749 EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
750 EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
751 EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
752 EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
753 EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
754 EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
755 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
756 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
757 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
758 EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
759 EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
760 EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
761 EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
762 EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
763 EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
764 EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
765 EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
766 EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
767 EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
768 EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
769 EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
772 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
773 (1ULL << EF10_STAT_tx_packets) | \
774 (1ULL << EF10_STAT_tx_pause) | \
775 (1ULL << EF10_STAT_tx_unicast) | \
776 (1ULL << EF10_STAT_tx_multicast) | \
777 (1ULL << EF10_STAT_tx_broadcast) | \
778 (1ULL << EF10_STAT_rx_bytes) | \
779 (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
780 (1ULL << EF10_STAT_rx_good_bytes) | \
781 (1ULL << EF10_STAT_rx_bad_bytes) | \
782 (1ULL << EF10_STAT_rx_packets) | \
783 (1ULL << EF10_STAT_rx_good) | \
784 (1ULL << EF10_STAT_rx_bad) | \
785 (1ULL << EF10_STAT_rx_pause) | \
786 (1ULL << EF10_STAT_rx_control) | \
787 (1ULL << EF10_STAT_rx_unicast) | \
788 (1ULL << EF10_STAT_rx_multicast) | \
789 (1ULL << EF10_STAT_rx_broadcast) | \
790 (1ULL << EF10_STAT_rx_lt64) | \
791 (1ULL << EF10_STAT_rx_64) | \
792 (1ULL << EF10_STAT_rx_65_to_127) | \
793 (1ULL << EF10_STAT_rx_128_to_255) | \
794 (1ULL << EF10_STAT_rx_256_to_511) | \
795 (1ULL << EF10_STAT_rx_512_to_1023) | \
796 (1ULL << EF10_STAT_rx_1024_to_15xx) | \
797 (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
798 (1ULL << EF10_STAT_rx_gtjumbo) | \
799 (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
800 (1ULL << EF10_STAT_rx_overflow) | \
801 (1ULL << EF10_STAT_rx_nodesc_drops))
803 /* These statistics are only provided by the 10G MAC. For a 10G/40G
804 * switchable port we do not expose these because they might not
805 * include all the packets they should.
807 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
808 (1ULL << EF10_STAT_tx_lt64) | \
809 (1ULL << EF10_STAT_tx_64) | \
810 (1ULL << EF10_STAT_tx_65_to_127) | \
811 (1ULL << EF10_STAT_tx_128_to_255) | \
812 (1ULL << EF10_STAT_tx_256_to_511) | \
813 (1ULL << EF10_STAT_tx_512_to_1023) | \
814 (1ULL << EF10_STAT_tx_1024_to_15xx) | \
815 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
817 /* These statistics are only provided by the 40G MAC. For a 10G/40G
818 * switchable port we do expose these because the errors will otherwise
821 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
822 (1ULL << EF10_STAT_rx_length_error))
824 /* These statistics are only provided if the firmware supports the
825 * capability PM_AND_RXDP_COUNTERS.
827 #define HUNT_PM_AND_RXDP_STAT_MASK ( \
828 (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
829 (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
830 (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
831 (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
832 (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
833 (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
834 (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
835 (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
836 (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
837 (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
838 (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
839 (1ULL << EF10_STAT_rx_dp_emerg_wait))
841 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
843 u64 raw_mask = HUNT_COMMON_STAT_MASK;
844 u32 port_caps = efx_mcdi_phy_get_caps(efx);
845 struct efx_ef10_nic_data *nic_data = efx->nic_data;
847 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
848 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
850 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
852 if (nic_data->datapath_caps &
853 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
854 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
859 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
861 u64 raw_mask = efx_ef10_raw_stat_mask(efx);
863 #if BITS_PER_LONG == 64
866 mask[0] = raw_mask & 0xffffffff;
867 mask[1] = raw_mask >> 32;
871 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
873 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
875 efx_ef10_get_stat_mask(efx, mask);
876 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
880 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
882 struct efx_ef10_nic_data *nic_data = efx->nic_data;
883 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
884 __le64 generation_start, generation_end;
885 u64 *stats = nic_data->stats;
888 efx_ef10_get_stat_mask(efx, mask);
890 dma_stats = efx->stats_buffer.addr;
891 nic_data = efx->nic_data;
893 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
894 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
897 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
898 stats, efx->stats_buffer.addr, false);
900 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
901 if (generation_end != generation_start)
904 /* Update derived statistics */
905 stats[EF10_STAT_rx_good_bytes] =
906 stats[EF10_STAT_rx_bytes] -
907 stats[EF10_STAT_rx_bytes_minus_good_bytes];
908 efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
909 stats[EF10_STAT_rx_bytes_minus_good_bytes]);
915 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
916 struct rtnl_link_stats64 *core_stats)
918 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
919 struct efx_ef10_nic_data *nic_data = efx->nic_data;
920 u64 *stats = nic_data->stats;
921 size_t stats_count = 0, index;
924 efx_ef10_get_stat_mask(efx, mask);
926 /* If we're unlucky enough to read statistics during the DMA, wait
927 * up to 10ms for it to finish (typically takes <500us)
929 for (retry = 0; retry < 100; ++retry) {
930 if (efx_ef10_try_update_nic_stats(efx) == 0)
936 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
937 if (efx_ef10_stat_desc[index].name) {
938 *full_stats++ = stats[index];
945 core_stats->rx_packets = stats[EF10_STAT_rx_packets];
946 core_stats->tx_packets = stats[EF10_STAT_tx_packets];
947 core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
948 core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
949 core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
950 core_stats->multicast = stats[EF10_STAT_rx_multicast];
951 core_stats->rx_length_errors =
952 stats[EF10_STAT_rx_gtjumbo] +
953 stats[EF10_STAT_rx_length_error];
954 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
955 core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
956 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
957 core_stats->rx_errors = (core_stats->rx_length_errors +
958 core_stats->rx_crc_errors +
959 core_stats->rx_frame_errors);
965 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
967 struct efx_nic *efx = channel->efx;
968 unsigned int mode, value;
969 efx_dword_t timer_cmd;
971 if (channel->irq_moderation) {
973 value = channel->irq_moderation - 1;
979 if (EFX_EF10_WORKAROUND_35388(efx)) {
980 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
981 EFE_DD_EVQ_IND_TIMER_FLAGS,
982 ERF_DD_EVQ_IND_TIMER_MODE, mode,
983 ERF_DD_EVQ_IND_TIMER_VAL, value);
984 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
987 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
988 ERF_DZ_TC_TIMER_VAL, value);
989 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
994 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
998 memset(&wol->sopass, 0, sizeof(wol->sopass));
1001 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1008 static void efx_ef10_mcdi_request(struct efx_nic *efx,
1009 const efx_dword_t *hdr, size_t hdr_len,
1010 const efx_dword_t *sdu, size_t sdu_len)
1012 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1013 u8 *pdu = nic_data->mcdi_buf.addr;
1015 memcpy(pdu, hdr, hdr_len);
1016 memcpy(pdu + hdr_len, sdu, sdu_len);
1019 /* The hardware provides 'low' and 'high' (doorbell) registers
1020 * for passing the 64-bit address of an MCDI request to
1021 * firmware. However the dwords are swapped by firmware. The
1022 * least significant bits of the doorbell are then 0 for all
1023 * MCDI requests due to alignment.
1025 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1027 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1031 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1033 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1034 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1037 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1041 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1042 size_t offset, size_t outlen)
1044 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1045 const u8 *pdu = nic_data->mcdi_buf.addr;
1047 memcpy(outbuf, pdu + offset, outlen);
1050 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1052 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1055 rc = efx_ef10_get_warm_boot_count(efx);
1057 /* The firmware is presumably in the process of
1058 * rebooting. However, we are supposed to report each
1059 * reboot just once, so we must only do that once we
1060 * can read and store the updated warm boot count.
1065 if (rc == nic_data->warm_boot_count)
1068 nic_data->warm_boot_count = rc;
1070 /* All our allocations have been reset */
1071 nic_data->must_realloc_vis = true;
1072 nic_data->must_restore_filters = true;
1073 nic_data->must_restore_piobufs = true;
1074 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1076 /* The datapath firmware might have been changed */
1077 nic_data->must_check_datapath_caps = true;
1079 /* MAC statistics have been cleared on the NIC; clear the local
1080 * statistic that we update with efx_update_diff_stat().
1082 nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
1087 /* Handle an MSI interrupt
1089 * Handle an MSI hardware interrupt. This routine schedules event
1090 * queue processing. No interrupt acknowledgement cycle is necessary.
1091 * Also, we never need to check that the interrupt is for us, since
1092 * MSI interrupts cannot be shared.
1094 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1096 struct efx_msi_context *context = dev_id;
1097 struct efx_nic *efx = context->efx;
1099 netif_vdbg(efx, intr, efx->net_dev,
1100 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1102 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1103 /* Note test interrupts */
1104 if (context->index == efx->irq_level)
1105 efx->last_irq_cpu = raw_smp_processor_id();
1107 /* Schedule processing of the channel */
1108 efx_schedule_channel_irq(efx->channel[context->index]);
1114 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1116 struct efx_nic *efx = dev_id;
1117 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1118 struct efx_channel *channel;
1122 /* Read the ISR which also ACKs the interrupts */
1123 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR);
1124 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1129 if (likely(soft_enabled)) {
1130 /* Note test interrupts */
1131 if (queues & (1U << efx->irq_level))
1132 efx->last_irq_cpu = raw_smp_processor_id();
1134 efx_for_each_channel(channel, efx) {
1136 efx_schedule_channel_irq(channel);
1141 netif_vdbg(efx, intr, efx->net_dev,
1142 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1143 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1148 static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1150 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1152 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1154 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1155 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1156 inbuf, sizeof(inbuf), NULL, 0, NULL);
1159 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1161 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1162 (tx_queue->ptr_mask + 1) *
1163 sizeof(efx_qword_t),
1167 /* This writes to the TX_DESC_WPTR and also pushes data */
1168 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1169 const efx_qword_t *txd)
1171 unsigned int write_ptr;
1174 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1175 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1176 reg.qword[0] = *txd;
1177 efx_writeo_page(tx_queue->efx, ®,
1178 ER_DZ_TX_DESC_UPD, tx_queue->queue);
1181 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1183 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1185 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
1186 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1187 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1188 struct efx_channel *channel = tx_queue->channel;
1189 struct efx_nic *efx = tx_queue->efx;
1190 size_t inlen, outlen;
1191 dma_addr_t dma_addr;
1196 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
1197 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
1198 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
1199 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
1200 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
1201 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
1202 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
1203 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
1204 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1206 dma_addr = tx_queue->txd.buf.dma_addr;
1208 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
1209 tx_queue->queue, entries, (u64)dma_addr);
1211 for (i = 0; i < entries; ++i) {
1212 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
1213 dma_addr += EFX_BUF_SIZE;
1216 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
1218 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
1219 outbuf, sizeof(outbuf), &outlen);
1223 /* A previous user of this TX queue might have set us up the
1224 * bomb by writing a descriptor to the TX push collector but
1225 * not the doorbell. (Each collector belongs to a port, not a
1226 * queue or function, so cannot easily be reset.) We must
1227 * attempt to push a no-op descriptor in its place.
1229 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
1230 tx_queue->insert_count = 1;
1231 txd = efx_tx_desc(tx_queue, 0);
1232 EFX_POPULATE_QWORD_4(*txd,
1233 ESF_DZ_TX_DESC_IS_OPT, true,
1234 ESF_DZ_TX_OPTION_TYPE,
1235 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
1236 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
1237 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
1238 tx_queue->write_count = 1;
1240 efx_ef10_push_tx_desc(tx_queue, txd);
1246 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1249 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
1251 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
1252 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
1253 struct efx_nic *efx = tx_queue->efx;
1257 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
1260 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
1261 outbuf, sizeof(outbuf), &outlen);
1263 if (rc && rc != -EALREADY)
1269 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1272 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
1274 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
1277 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1278 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
1280 unsigned int write_ptr;
1283 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1284 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
1285 efx_writed_page(tx_queue->efx, ®,
1286 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
1289 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1291 unsigned int old_write_count = tx_queue->write_count;
1292 struct efx_tx_buffer *buffer;
1293 unsigned int write_ptr;
1296 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
1299 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1300 buffer = &tx_queue->buffer[write_ptr];
1301 txd = efx_tx_desc(tx_queue, write_ptr);
1302 ++tx_queue->write_count;
1304 /* Create TX descriptor ring entry */
1305 if (buffer->flags & EFX_TX_BUF_OPTION) {
1306 *txd = buffer->option;
1308 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
1309 EFX_POPULATE_QWORD_3(
1312 buffer->flags & EFX_TX_BUF_CONT,
1313 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1314 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1316 } while (tx_queue->write_count != tx_queue->insert_count);
1318 wmb(); /* Ensure descriptors are written before they are fetched */
1320 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1321 txd = efx_tx_desc(tx_queue,
1322 old_write_count & tx_queue->ptr_mask);
1323 efx_ef10_push_tx_desc(tx_queue, txd);
1326 efx_ef10_notify_tx_desc(tx_queue);
1330 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
1332 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1333 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1337 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1338 EVB_PORT_ID_ASSIGNED);
1339 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
1340 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
1341 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
1344 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1345 outbuf, sizeof(outbuf), &outlen);
1349 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1352 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1357 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1359 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1362 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1365 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1370 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1372 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1373 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1376 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1378 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1379 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1381 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1383 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1384 (u8) efx->rx_indir_table[i];
1386 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1387 sizeof(tablebuf), NULL, 0, NULL);
1391 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1393 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1394 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1395 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1396 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1397 efx->rx_hash_key[i];
1399 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1400 sizeof(keybuf), NULL, 0, NULL);
1403 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1405 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1407 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1408 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1409 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1412 static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
1414 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1417 netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
1419 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1420 rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1425 rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1432 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1435 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1437 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1438 (rx_queue->ptr_mask + 1) *
1439 sizeof(efx_qword_t),
1443 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1445 MCDI_DECLARE_BUF(inbuf,
1446 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1448 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1449 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1450 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1451 struct efx_nic *efx = rx_queue->efx;
1452 size_t inlen, outlen;
1453 dma_addr_t dma_addr;
1457 rx_queue->scatter_n = 0;
1458 rx_queue->scatter_len = 0;
1460 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1461 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1462 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1463 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1464 efx_rx_queue_index(rx_queue));
1465 MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
1466 INIT_RXQ_IN_FLAG_PREFIX, 1);
1467 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1468 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1470 dma_addr = rx_queue->rxd.buf.dma_addr;
1472 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1473 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1475 for (i = 0; i < entries; ++i) {
1476 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1477 dma_addr += EFX_BUF_SIZE;
1480 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1482 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1483 outbuf, sizeof(outbuf), &outlen);
1491 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1494 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1496 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1497 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1498 struct efx_nic *efx = rx_queue->efx;
1502 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1503 efx_rx_queue_index(rx_queue));
1505 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1506 outbuf, sizeof(outbuf), &outlen);
1508 if (rc && rc != -EALREADY)
1514 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1517 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1519 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1522 /* This creates an entry in the RX descriptor queue */
1524 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1526 struct efx_rx_buffer *rx_buf;
1529 rxd = efx_rx_desc(rx_queue, index);
1530 rx_buf = efx_rx_buffer(rx_queue, index);
1531 EFX_POPULATE_QWORD_2(*rxd,
1532 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1533 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1536 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1538 struct efx_nic *efx = rx_queue->efx;
1539 unsigned int write_count;
1542 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1543 write_count = rx_queue->added_count & ~7;
1544 if (rx_queue->notified_count == write_count)
1548 efx_ef10_build_rx_desc(
1550 rx_queue->notified_count & rx_queue->ptr_mask);
1551 while (++rx_queue->notified_count != write_count);
1554 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1555 write_count & rx_queue->ptr_mask);
1556 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD,
1557 efx_rx_queue_index(rx_queue));
1560 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1562 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1564 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1565 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1568 EFX_POPULATE_QWORD_2(event,
1569 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1570 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1572 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1574 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1575 * already swapped the data to little-endian order.
1577 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1578 sizeof(efx_qword_t));
1580 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1581 inbuf, sizeof(inbuf), 0,
1582 efx_ef10_rx_defer_refill_complete, 0);
1586 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1587 int rc, efx_dword_t *outbuf,
1588 size_t outlen_actual)
1593 static int efx_ef10_ev_probe(struct efx_channel *channel)
1595 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1596 (channel->eventq_mask + 1) *
1597 sizeof(efx_qword_t),
1601 static int efx_ef10_ev_init(struct efx_channel *channel)
1603 MCDI_DECLARE_BUF(inbuf,
1604 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1606 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1607 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1608 struct efx_nic *efx = channel->efx;
1609 struct efx_ef10_nic_data *nic_data;
1610 bool supports_rx_merge;
1611 size_t inlen, outlen;
1612 dma_addr_t dma_addr;
1616 nic_data = efx->nic_data;
1618 !!(nic_data->datapath_caps &
1619 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1621 /* Fill event queue with all ones (i.e. empty events) */
1622 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1624 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1625 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1626 /* INIT_EVQ expects index in vector table, not absolute */
1627 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1628 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1629 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1630 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1631 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1632 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1633 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1634 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1635 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1636 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1637 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1638 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1639 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1641 dma_addr = channel->eventq.buf.dma_addr;
1642 for (i = 0; i < entries; ++i) {
1643 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1644 dma_addr += EFX_BUF_SIZE;
1647 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1649 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1650 outbuf, sizeof(outbuf), &outlen);
1654 /* IRQ return is ignored */
1659 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1663 static void efx_ef10_ev_fini(struct efx_channel *channel)
1665 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1666 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1667 struct efx_nic *efx = channel->efx;
1671 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1673 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1674 outbuf, sizeof(outbuf), &outlen);
1676 if (rc && rc != -EALREADY)
1682 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1685 static void efx_ef10_ev_remove(struct efx_channel *channel)
1687 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1690 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1691 unsigned int rx_queue_label)
1693 struct efx_nic *efx = rx_queue->efx;
1695 netif_info(efx, hw, efx->net_dev,
1696 "rx event arrived on queue %d labeled as queue %u\n",
1697 efx_rx_queue_index(rx_queue), rx_queue_label);
1699 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1703 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1704 unsigned int actual, unsigned int expected)
1706 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1707 struct efx_nic *efx = rx_queue->efx;
1709 netif_info(efx, hw, efx->net_dev,
1710 "dropped %d events (index=%d expected=%d)\n",
1711 dropped, actual, expected);
1713 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1716 /* partially received RX was aborted. clean up. */
1717 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1719 unsigned int rx_desc_ptr;
1721 WARN_ON(rx_queue->scatter_n == 0);
1723 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1724 "scattered RX aborted (dropping %u buffers)\n",
1725 rx_queue->scatter_n);
1727 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1729 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1730 0, EFX_RX_PKT_DISCARD);
1732 rx_queue->removed_count += rx_queue->scatter_n;
1733 rx_queue->scatter_n = 0;
1734 rx_queue->scatter_len = 0;
1735 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1738 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1739 const efx_qword_t *event)
1741 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1742 unsigned int n_descs, n_packets, i;
1743 struct efx_nic *efx = channel->efx;
1744 struct efx_rx_queue *rx_queue;
1748 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1751 /* Basic packet information */
1752 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1753 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1754 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1755 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1756 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1758 WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
1760 rx_queue = efx_channel_get_rx_queue(channel);
1762 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1763 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1765 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1766 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1768 if (n_descs != rx_queue->scatter_n + 1) {
1769 /* detect rx abort */
1770 if (unlikely(n_descs == rx_queue->scatter_n)) {
1771 WARN_ON(rx_bytes != 0);
1772 efx_ef10_handle_rx_abort(rx_queue);
1776 if (unlikely(rx_queue->scatter_n != 0)) {
1777 /* Scattered packet completions cannot be
1778 * merged, so something has gone wrong.
1780 efx_ef10_handle_rx_bad_lbits(
1781 rx_queue, next_ptr_lbits,
1782 (rx_queue->removed_count +
1783 rx_queue->scatter_n + 1) &
1784 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1788 /* Merged completion for multiple non-scattered packets */
1789 rx_queue->scatter_n = 1;
1790 rx_queue->scatter_len = 0;
1791 n_packets = n_descs;
1792 ++channel->n_rx_merge_events;
1793 channel->n_rx_merge_packets += n_packets;
1794 flags |= EFX_RX_PKT_PREFIX_LEN;
1796 ++rx_queue->scatter_n;
1797 rx_queue->scatter_len += rx_bytes;
1803 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1804 flags |= EFX_RX_PKT_DISCARD;
1806 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1807 channel->n_rx_ip_hdr_chksum_err += n_packets;
1808 } else if (unlikely(EFX_QWORD_FIELD(*event,
1809 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1810 channel->n_rx_tcp_udp_chksum_err += n_packets;
1811 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1812 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1813 flags |= EFX_RX_PKT_CSUMMED;
1816 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1817 flags |= EFX_RX_PKT_TCP;
1819 channel->irq_mod_score += 2 * n_packets;
1821 /* Handle received packet(s) */
1822 for (i = 0; i < n_packets; i++) {
1823 efx_rx_packet(rx_queue,
1824 rx_queue->removed_count & rx_queue->ptr_mask,
1825 rx_queue->scatter_n, rx_queue->scatter_len,
1827 rx_queue->removed_count += rx_queue->scatter_n;
1830 rx_queue->scatter_n = 0;
1831 rx_queue->scatter_len = 0;
1837 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1839 struct efx_nic *efx = channel->efx;
1840 struct efx_tx_queue *tx_queue;
1841 unsigned int tx_ev_desc_ptr;
1842 unsigned int tx_ev_q_label;
1845 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1848 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1851 /* Transmit completion */
1852 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1853 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1854 tx_queue = efx_channel_get_tx_queue(channel,
1855 tx_ev_q_label % EFX_TXQ_TYPES);
1856 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1857 tx_queue->ptr_mask);
1858 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1864 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1866 struct efx_nic *efx = channel->efx;
1869 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1872 case ESE_DZ_DRV_TIMER_EV:
1873 case ESE_DZ_DRV_WAKE_UP_EV:
1875 case ESE_DZ_DRV_START_UP_EV:
1876 /* event queue init complete. ok. */
1879 netif_err(efx, hw, efx->net_dev,
1880 "channel %d unknown driver event type %d"
1881 " (data " EFX_QWORD_FMT ")\n",
1882 channel->channel, subcode,
1883 EFX_QWORD_VAL(*event));
1888 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1891 struct efx_nic *efx = channel->efx;
1894 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1898 channel->event_test_cpu = raw_smp_processor_id();
1900 case EFX_EF10_REFILL:
1901 /* The queue must be empty, so we won't receive any rx
1902 * events, so efx_process_channel() won't refill the
1903 * queue. Refill it here
1905 efx_fast_push_rx_descriptors(&channel->rx_queue);
1908 netif_err(efx, hw, efx->net_dev,
1909 "channel %d unknown driver event type %u"
1910 " (data " EFX_QWORD_FMT ")\n",
1911 channel->channel, (unsigned) subcode,
1912 EFX_QWORD_VAL(*event));
1916 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1918 struct efx_nic *efx = channel->efx;
1919 efx_qword_t event, *p_event;
1920 unsigned int read_ptr;
1925 read_ptr = channel->eventq_read_ptr;
1928 p_event = efx_event(channel, read_ptr);
1931 if (!efx_event_present(&event))
1934 EFX_SET_QWORD(*p_event);
1938 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1940 netif_vdbg(efx, drv, efx->net_dev,
1941 "processing event on %d " EFX_QWORD_FMT "\n",
1942 channel->channel, EFX_QWORD_VAL(event));
1945 case ESE_DZ_EV_CODE_MCDI_EV:
1946 efx_mcdi_process_event(channel, &event);
1948 case ESE_DZ_EV_CODE_RX_EV:
1949 spent += efx_ef10_handle_rx_event(channel, &event);
1950 if (spent >= quota) {
1951 /* XXX can we split a merged event to
1952 * avoid going over-quota?
1958 case ESE_DZ_EV_CODE_TX_EV:
1959 tx_descs += efx_ef10_handle_tx_event(channel, &event);
1960 if (tx_descs > efx->txq_entries) {
1963 } else if (++spent == quota) {
1967 case ESE_DZ_EV_CODE_DRIVER_EV:
1968 efx_ef10_handle_driver_event(channel, &event);
1969 if (++spent == quota)
1972 case EFX_EF10_DRVGEN_EV:
1973 efx_ef10_handle_driver_generated_event(channel, &event);
1976 netif_err(efx, hw, efx->net_dev,
1977 "channel %d unknown event type %d"
1978 " (data " EFX_QWORD_FMT ")\n",
1979 channel->channel, ev_code,
1980 EFX_QWORD_VAL(event));
1985 channel->eventq_read_ptr = read_ptr;
1989 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
1991 struct efx_nic *efx = channel->efx;
1994 if (EFX_EF10_WORKAROUND_35388(efx)) {
1995 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
1996 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
1997 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
1998 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2000 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2001 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2002 ERF_DD_EVQ_IND_RPTR,
2003 (channel->eventq_read_ptr &
2004 channel->eventq_mask) >>
2005 ERF_DD_EVQ_IND_RPTR_WIDTH);
2006 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2008 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2009 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2010 ERF_DD_EVQ_IND_RPTR,
2011 channel->eventq_read_ptr &
2012 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2013 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2016 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
2017 channel->eventq_read_ptr &
2018 channel->eventq_mask);
2019 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
2023 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
2025 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2026 struct efx_nic *efx = channel->efx;
2030 EFX_POPULATE_QWORD_2(event,
2031 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2032 ESF_DZ_EV_DATA, EFX_EF10_TEST);
2034 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2036 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2037 * already swapped the data to little-endian order.
2039 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2040 sizeof(efx_qword_t));
2042 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
2051 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2054 void efx_ef10_handle_drain_event(struct efx_nic *efx)
2056 if (atomic_dec_and_test(&efx->active_queues))
2057 wake_up(&efx->flush_wq);
2059 WARN_ON(atomic_read(&efx->active_queues) < 0);
2062 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2064 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2065 struct efx_channel *channel;
2066 struct efx_tx_queue *tx_queue;
2067 struct efx_rx_queue *rx_queue;
2070 /* If the MC has just rebooted, the TX/RX queues will have already been
2071 * torn down, but efx->active_queues needs to be set to zero.
2073 if (nic_data->must_realloc_vis) {
2074 atomic_set(&efx->active_queues, 0);
2078 /* Do not attempt to write to the NIC during EEH recovery */
2079 if (efx->state != STATE_RECOVERY) {
2080 efx_for_each_channel(channel, efx) {
2081 efx_for_each_channel_rx_queue(rx_queue, channel)
2082 efx_ef10_rx_fini(rx_queue);
2083 efx_for_each_channel_tx_queue(tx_queue, channel)
2084 efx_ef10_tx_fini(tx_queue);
2087 wait_event_timeout(efx->flush_wq,
2088 atomic_read(&efx->active_queues) == 0,
2089 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
2090 pending = atomic_read(&efx->active_queues);
2092 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
2101 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2102 const struct efx_filter_spec *right)
2104 if ((left->match_flags ^ right->match_flags) |
2105 ((left->flags ^ right->flags) &
2106 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
2109 return memcmp(&left->outer_vid, &right->outer_vid,
2110 sizeof(struct efx_filter_spec) -
2111 offsetof(struct efx_filter_spec, outer_vid)) == 0;
2114 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
2116 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
2117 return jhash2((const u32 *)&spec->outer_vid,
2118 (sizeof(struct efx_filter_spec) -
2119 offsetof(struct efx_filter_spec, outer_vid)) / 4,
2121 /* XXX should we randomise the initval? */
2124 /* Decide whether a filter should be exclusive or else should allow
2125 * delivery to additional recipients. Currently we decide that
2126 * filters for specific local unicast MAC and IP addresses are
2129 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
2131 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
2132 !is_multicast_ether_addr(spec->loc_mac))
2135 if ((spec->match_flags &
2136 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
2137 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
2138 if (spec->ether_type == htons(ETH_P_IP) &&
2139 !ipv4_is_multicast(spec->loc_host[0]))
2141 if (spec->ether_type == htons(ETH_P_IPV6) &&
2142 ((const u8 *)spec->loc_host)[0] != 0xff)
2149 static struct efx_filter_spec *
2150 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
2151 unsigned int filter_idx)
2153 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
2154 ~EFX_EF10_FILTER_FLAGS);
2158 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
2159 unsigned int filter_idx)
2161 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
2165 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
2166 unsigned int filter_idx,
2167 const struct efx_filter_spec *spec,
2170 table->entry[filter_idx].spec = (unsigned long)spec | flags;
2173 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2174 const struct efx_filter_spec *spec,
2175 efx_dword_t *inbuf, u64 handle,
2178 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2180 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2183 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2184 MC_CMD_FILTER_OP_IN_OP_REPLACE);
2185 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
2187 u32 match_fields = 0;
2189 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2190 efx_ef10_filter_is_exclusive(spec) ?
2191 MC_CMD_FILTER_OP_IN_OP_INSERT :
2192 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
2194 /* Convert match flags and values. Unlike almost
2195 * everything else in MCDI, these fields are in
2196 * network byte order.
2198 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
2200 is_multicast_ether_addr(spec->loc_mac) ?
2201 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
2202 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
2203 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
2204 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
2206 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2207 mcdi_field ## _LBN; \
2209 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2210 sizeof(spec->gen_field)); \
2211 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2212 &spec->gen_field, sizeof(spec->gen_field)); \
2214 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
2215 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
2216 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
2217 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
2218 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
2219 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
2220 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
2221 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
2222 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
2223 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
2225 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
2229 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2230 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
2231 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2232 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
2233 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
2234 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
2235 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
2236 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
2237 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2238 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
2239 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2240 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2241 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
2242 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2243 spec->rss_context !=
2244 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
2245 spec->rss_context : nic_data->rx_rss_context);
2248 static int efx_ef10_filter_push(struct efx_nic *efx,
2249 const struct efx_filter_spec *spec,
2250 u64 *handle, bool replacing)
2252 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2253 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
2256 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
2257 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2258 outbuf, sizeof(outbuf), NULL);
2260 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2264 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
2265 enum efx_filter_match_flags match_flags)
2267 unsigned int match_pri;
2270 match_pri < table->rx_match_count;
2272 if (table->rx_match_flags[match_pri] == match_flags)
2275 return -EPROTONOSUPPORT;
2278 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
2279 struct efx_filter_spec *spec,
2282 struct efx_ef10_filter_table *table = efx->filter_state;
2283 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2284 struct efx_filter_spec *saved_spec;
2285 unsigned int match_pri, hash;
2286 unsigned int priv_flags;
2287 bool replacing = false;
2293 /* For now, only support RX filters */
2294 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
2298 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
2303 hash = efx_ef10_filter_hash(spec);
2304 is_mc_recip = efx_filter_is_mc_recipient(spec);
2306 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2308 /* Find any existing filters with the same match tuple or
2309 * else a free slot to insert at. If any of them are busy,
2310 * we have to wait and retry.
2313 unsigned int depth = 1;
2316 spin_lock_bh(&efx->filter_lock);
2319 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2320 saved_spec = efx_ef10_filter_entry_spec(table, i);
2325 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2326 if (table->entry[i].spec &
2327 EFX_EF10_FILTER_FLAG_BUSY)
2329 if (spec->priority < saved_spec->priority &&
2330 !(saved_spec->priority ==
2331 EFX_FILTER_PRI_REQUIRED &&
2333 EFX_FILTER_FLAG_RX_STACK)) {
2338 /* This is the only one */
2339 if (spec->priority ==
2340 saved_spec->priority &&
2347 } else if (spec->priority >
2348 saved_spec->priority ||
2350 saved_spec->priority &&
2355 __set_bit(depth, mc_rem_map);
2359 /* Once we reach the maximum search depth, use
2360 * the first suitable slot or return -EBUSY if
2363 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2364 if (ins_index < 0) {
2374 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2375 spin_unlock_bh(&efx->filter_lock);
2380 /* Create a software table entry if necessary, and mark it
2381 * busy. We might yet fail to insert, but any attempt to
2382 * insert a conflicting filter while we're waiting for the
2383 * firmware must find the busy entry.
2385 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2387 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2388 /* Just make sure it won't be removed */
2389 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2390 table->entry[ins_index].spec &=
2391 ~EFX_EF10_FILTER_FLAG_STACK_OLD;
2396 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2398 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2403 *saved_spec = *spec;
2406 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2407 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2409 /* Mark lower-priority multicast recipients busy prior to removal */
2411 unsigned int depth, i;
2413 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2414 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2415 if (test_bit(depth, mc_rem_map))
2416 table->entry[i].spec |=
2417 EFX_EF10_FILTER_FLAG_BUSY;
2421 spin_unlock_bh(&efx->filter_lock);
2423 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2426 /* Finalise the software table entry */
2427 spin_lock_bh(&efx->filter_lock);
2430 /* Update the fields that may differ */
2431 saved_spec->priority = spec->priority;
2432 saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
2433 saved_spec->flags |= spec->flags;
2434 saved_spec->rss_context = spec->rss_context;
2435 saved_spec->dmaq_id = spec->dmaq_id;
2437 } else if (!replacing) {
2441 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2443 /* Remove and finalise entries for lower-priority multicast
2447 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2448 unsigned int depth, i;
2450 memset(inbuf, 0, sizeof(inbuf));
2452 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2453 if (!test_bit(depth, mc_rem_map))
2456 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2457 saved_spec = efx_ef10_filter_entry_spec(table, i);
2458 priv_flags = efx_ef10_filter_entry_flags(table, i);
2461 spin_unlock_bh(&efx->filter_lock);
2462 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2463 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2464 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2465 table->entry[i].handle);
2466 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2467 inbuf, sizeof(inbuf),
2469 spin_lock_bh(&efx->filter_lock);
2477 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2479 efx_ef10_filter_set_entry(table, i, saved_spec,
2484 /* If successful, return the inserted filter ID */
2486 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2488 wake_up_all(&table->waitq);
2490 spin_unlock_bh(&efx->filter_lock);
2491 finish_wait(&table->waitq, &wait);
2495 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2497 /* no need to do anything here on EF10 */
2501 * If !stack_requested, remove by ID
2502 * If stack_requested, remove by index
2503 * Filter ID may come from userland and must be range-checked.
2505 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2506 enum efx_filter_priority priority,
2507 u32 filter_id, bool stack_requested)
2509 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2510 struct efx_ef10_filter_table *table = efx->filter_state;
2511 MCDI_DECLARE_BUF(inbuf,
2512 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2513 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2514 struct efx_filter_spec *spec;
2518 /* Find the software table entry and mark it busy. Don't
2519 * remove it yet; any attempt to update while we're waiting
2520 * for the firmware must find the busy entry.
2523 spin_lock_bh(&efx->filter_lock);
2524 if (!(table->entry[filter_idx].spec &
2525 EFX_EF10_FILTER_FLAG_BUSY))
2527 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2528 spin_unlock_bh(&efx->filter_lock);
2531 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2532 if (!spec || spec->priority > priority ||
2533 (!stack_requested &&
2534 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2535 filter_id / HUNT_FILTER_TBL_ROWS)) {
2539 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2540 spin_unlock_bh(&efx->filter_lock);
2542 if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
2543 /* Reset steering of a stack-owned filter */
2545 struct efx_filter_spec new_spec = *spec;
2547 new_spec.priority = EFX_FILTER_PRI_REQUIRED;
2548 new_spec.flags = (EFX_FILTER_FLAG_RX |
2549 EFX_FILTER_FLAG_RX_RSS |
2550 EFX_FILTER_FLAG_RX_STACK);
2551 new_spec.dmaq_id = 0;
2552 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2553 rc = efx_ef10_filter_push(efx, &new_spec,
2554 &table->entry[filter_idx].handle,
2557 spin_lock_bh(&efx->filter_lock);
2561 /* Really remove the filter */
2563 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2564 efx_ef10_filter_is_exclusive(spec) ?
2565 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2566 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2567 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2568 table->entry[filter_idx].handle);
2569 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2570 inbuf, sizeof(inbuf), NULL, 0, NULL);
2572 spin_lock_bh(&efx->filter_lock);
2575 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2578 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2579 wake_up_all(&table->waitq);
2581 spin_unlock_bh(&efx->filter_lock);
2582 finish_wait(&table->waitq, &wait);
2586 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2587 enum efx_filter_priority priority,
2590 return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
2593 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2594 enum efx_filter_priority priority,
2595 u32 filter_id, struct efx_filter_spec *spec)
2597 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2598 struct efx_ef10_filter_table *table = efx->filter_state;
2599 const struct efx_filter_spec *saved_spec;
2602 spin_lock_bh(&efx->filter_lock);
2603 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2604 if (saved_spec && saved_spec->priority == priority &&
2605 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2606 filter_id / HUNT_FILTER_TBL_ROWS) {
2607 *spec = *saved_spec;
2612 spin_unlock_bh(&efx->filter_lock);
2616 static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
2617 enum efx_filter_priority priority)
2622 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2623 enum efx_filter_priority priority)
2625 struct efx_ef10_filter_table *table = efx->filter_state;
2626 unsigned int filter_idx;
2629 spin_lock_bh(&efx->filter_lock);
2630 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2631 if (table->entry[filter_idx].spec &&
2632 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2636 spin_unlock_bh(&efx->filter_lock);
2640 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2642 struct efx_ef10_filter_table *table = efx->filter_state;
2644 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2647 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2648 enum efx_filter_priority priority,
2651 struct efx_ef10_filter_table *table = efx->filter_state;
2652 struct efx_filter_spec *spec;
2653 unsigned int filter_idx;
2656 spin_lock_bh(&efx->filter_lock);
2657 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2658 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2659 if (spec && spec->priority == priority) {
2660 if (count == size) {
2664 buf[count++] = (efx_ef10_filter_rx_match_pri(
2665 table, spec->match_flags) *
2666 HUNT_FILTER_TBL_ROWS +
2670 spin_unlock_bh(&efx->filter_lock);
2674 #ifdef CONFIG_RFS_ACCEL
2676 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2678 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2679 struct efx_filter_spec *spec)
2681 struct efx_ef10_filter_table *table = efx->filter_state;
2682 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2683 struct efx_filter_spec *saved_spec;
2684 unsigned int hash, i, depth = 1;
2685 bool replacing = false;
2690 /* Must be an RX filter without RSS and not for a multicast
2691 * destination address (RFS only works for connected sockets).
2692 * These restrictions allow us to pass only a tiny amount of
2693 * data through to the completion function.
2695 EFX_WARN_ON_PARANOID(spec->flags !=
2696 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2697 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2698 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2700 hash = efx_ef10_filter_hash(spec);
2702 spin_lock_bh(&efx->filter_lock);
2704 /* Find any existing filter with the same match tuple or else
2705 * a free slot to insert at. If an existing filter is busy,
2706 * we have to give up.
2709 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2710 saved_spec = efx_ef10_filter_entry_spec(table, i);
2715 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2716 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2720 EFX_WARN_ON_PARANOID(saved_spec->flags &
2721 EFX_FILTER_FLAG_RX_STACK);
2722 if (spec->priority < saved_spec->priority) {
2730 /* Once we reach the maximum search depth, use the
2731 * first suitable slot or return -EBUSY if there was
2734 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2735 if (ins_index < 0) {
2745 /* Create a software table entry if necessary, and mark it
2746 * busy. We might yet fail to insert, but any attempt to
2747 * insert a conflicting filter while we're waiting for the
2748 * firmware must find the busy entry.
2750 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2754 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2759 *saved_spec = *spec;
2761 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2762 EFX_EF10_FILTER_FLAG_BUSY);
2764 spin_unlock_bh(&efx->filter_lock);
2766 /* Pack up the variables needed on completion */
2767 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2769 efx_ef10_filter_push_prep(efx, spec, inbuf,
2770 table->entry[ins_index].handle, replacing);
2771 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2772 MC_CMD_FILTER_OP_OUT_LEN,
2773 efx_ef10_filter_rfs_insert_complete, cookie);
2778 spin_unlock_bh(&efx->filter_lock);
2783 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2784 int rc, efx_dword_t *outbuf,
2785 size_t outlen_actual)
2787 struct efx_ef10_filter_table *table = efx->filter_state;
2788 unsigned int ins_index, dmaq_id;
2789 struct efx_filter_spec *spec;
2792 /* Unpack the cookie */
2793 replacing = cookie >> 31;
2794 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2795 dmaq_id = cookie & 0xffff;
2797 spin_lock_bh(&efx->filter_lock);
2798 spec = efx_ef10_filter_entry_spec(table, ins_index);
2800 table->entry[ins_index].handle =
2801 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2803 spec->dmaq_id = dmaq_id;
2804 } else if (!replacing) {
2808 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2809 spin_unlock_bh(&efx->filter_lock);
2811 wake_up_all(&table->waitq);
2815 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2816 unsigned long filter_idx,
2817 int rc, efx_dword_t *outbuf,
2818 size_t outlen_actual);
2820 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2821 unsigned int filter_idx)
2823 struct efx_ef10_filter_table *table = efx->filter_state;
2824 struct efx_filter_spec *spec =
2825 efx_ef10_filter_entry_spec(table, filter_idx);
2826 MCDI_DECLARE_BUF(inbuf,
2827 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2828 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2831 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2832 spec->priority != EFX_FILTER_PRI_HINT ||
2833 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2834 flow_id, filter_idx))
2837 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2838 MC_CMD_FILTER_OP_IN_OP_REMOVE);
2839 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2840 table->entry[filter_idx].handle);
2841 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2842 efx_ef10_filter_rfs_expire_complete, filter_idx))
2845 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2850 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2851 unsigned long filter_idx,
2852 int rc, efx_dword_t *outbuf,
2853 size_t outlen_actual)
2855 struct efx_ef10_filter_table *table = efx->filter_state;
2856 struct efx_filter_spec *spec =
2857 efx_ef10_filter_entry_spec(table, filter_idx);
2859 spin_lock_bh(&efx->filter_lock);
2862 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2864 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2865 wake_up_all(&table->waitq);
2866 spin_unlock_bh(&efx->filter_lock);
2869 #endif /* CONFIG_RFS_ACCEL */
2871 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2873 int match_flags = 0;
2875 #define MAP_FLAG(gen_flag, mcdi_field) { \
2876 u32 old_mcdi_flags = mcdi_flags; \
2877 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2878 mcdi_field ## _LBN); \
2879 if (mcdi_flags != old_mcdi_flags) \
2880 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
2882 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2883 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2884 MAP_FLAG(REM_HOST, SRC_IP);
2885 MAP_FLAG(LOC_HOST, DST_IP);
2886 MAP_FLAG(REM_MAC, SRC_MAC);
2887 MAP_FLAG(REM_PORT, SRC_PORT);
2888 MAP_FLAG(LOC_MAC, DST_MAC);
2889 MAP_FLAG(LOC_PORT, DST_PORT);
2890 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2891 MAP_FLAG(INNER_VID, INNER_VLAN);
2892 MAP_FLAG(OUTER_VID, OUTER_VLAN);
2893 MAP_FLAG(IP_PROTO, IP_PROTO);
2896 /* Did we map them all? */
2903 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2905 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2906 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2907 unsigned int pd_match_pri, pd_match_count;
2908 struct efx_ef10_filter_table *table;
2912 table = kzalloc(sizeof(*table), GFP_KERNEL);
2916 /* Find out which RX filter types are supported, and their priorities */
2917 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2918 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2919 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2920 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2924 pd_match_count = MCDI_VAR_ARRAY_LEN(
2925 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2926 table->rx_match_count = 0;
2928 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2932 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
2934 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
2936 netif_dbg(efx, probe, efx->net_dev,
2937 "%s: fw flags %#x pri %u not supported in driver\n",
2938 __func__, mcdi_flags, pd_match_pri);
2940 netif_dbg(efx, probe, efx->net_dev,
2941 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2942 __func__, mcdi_flags, pd_match_pri,
2943 rc, table->rx_match_count);
2944 table->rx_match_flags[table->rx_match_count++] = rc;
2948 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
2949 if (!table->entry) {
2954 efx->filter_state = table;
2955 init_waitqueue_head(&table->waitq);
2963 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
2965 struct efx_ef10_filter_table *table = efx->filter_state;
2966 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2967 struct efx_filter_spec *spec;
2968 unsigned int filter_idx;
2969 bool failed = false;
2972 if (!nic_data->must_restore_filters)
2975 spin_lock_bh(&efx->filter_lock);
2977 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2978 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2982 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2983 spin_unlock_bh(&efx->filter_lock);
2985 rc = efx_ef10_filter_push(efx, spec,
2986 &table->entry[filter_idx].handle,
2991 spin_lock_bh(&efx->filter_lock);
2994 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2996 table->entry[filter_idx].spec &=
2997 ~EFX_EF10_FILTER_FLAG_BUSY;
3001 spin_unlock_bh(&efx->filter_lock);
3004 netif_err(efx, hw, efx->net_dev,
3005 "unable to restore all filters\n");
3007 nic_data->must_restore_filters = false;
3010 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3012 struct efx_ef10_filter_table *table = efx->filter_state;
3013 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3014 struct efx_filter_spec *spec;
3015 unsigned int filter_idx;
3018 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3019 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3023 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3024 efx_ef10_filter_is_exclusive(spec) ?
3025 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3026 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3027 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3028 table->entry[filter_idx].handle);
3029 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3036 vfree(table->entry);
3040 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3042 struct efx_ef10_filter_table *table = efx->filter_state;
3043 struct net_device *net_dev = efx->net_dev;
3044 struct efx_filter_spec spec;
3045 bool remove_failed = false;
3046 struct netdev_hw_addr *uc;
3047 struct netdev_hw_addr *mc;
3048 unsigned int filter_idx;
3051 if (!efx_dev_registered(efx))
3054 /* Mark old filters that may need to be removed */
3055 spin_lock_bh(&efx->filter_lock);
3056 n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
3057 for (i = 0; i < n; i++) {
3058 filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
3059 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
3061 n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
3062 for (i = 0; i < n; i++) {
3063 filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
3064 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
3066 spin_unlock_bh(&efx->filter_lock);
3068 /* Copy/convert the address lists; add the primary station
3069 * address and broadcast address
3071 netif_addr_lock_bh(net_dev);
3072 if (net_dev->flags & IFF_PROMISC ||
3073 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
3074 table->stack_uc_count = -1;
3076 table->stack_uc_count = 1 + netdev_uc_count(net_dev);
3077 memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
3080 netdev_for_each_uc_addr(uc, net_dev) {
3081 memcpy(table->stack_uc_list[i].addr,
3082 uc->addr, ETH_ALEN);
3086 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
3087 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
3088 table->stack_mc_count = -1;
3090 table->stack_mc_count = 1 + netdev_mc_count(net_dev);
3091 eth_broadcast_addr(table->stack_mc_list[0].addr);
3093 netdev_for_each_mc_addr(mc, net_dev) {
3094 memcpy(table->stack_mc_list[i].addr,
3095 mc->addr, ETH_ALEN);
3099 netif_addr_unlock_bh(net_dev);
3101 /* Insert/renew unicast filters */
3102 if (table->stack_uc_count >= 0) {
3103 for (i = 0; i < table->stack_uc_count; i++) {
3104 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3105 EFX_FILTER_FLAG_RX_RSS |
3106 EFX_FILTER_FLAG_RX_STACK,
3108 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3109 table->stack_uc_list[i].addr);
3110 rc = efx_ef10_filter_insert(efx, &spec, true);
3112 /* Fall back to unicast-promisc */
3114 efx_ef10_filter_remove_safe(
3115 efx, EFX_FILTER_PRI_REQUIRED,
3116 table->stack_uc_list[i].id);
3117 table->stack_uc_count = -1;
3120 table->stack_uc_list[i].id = rc;
3123 if (table->stack_uc_count < 0) {
3124 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3125 EFX_FILTER_FLAG_RX_RSS |
3126 EFX_FILTER_FLAG_RX_STACK,
3128 efx_filter_set_uc_def(&spec);
3129 rc = efx_ef10_filter_insert(efx, &spec, true);
3132 table->stack_uc_count = 0;
3134 table->stack_uc_list[0].id = rc;
3138 /* Insert/renew multicast filters */
3139 if (table->stack_mc_count >= 0) {
3140 for (i = 0; i < table->stack_mc_count; i++) {
3141 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3142 EFX_FILTER_FLAG_RX_RSS |
3143 EFX_FILTER_FLAG_RX_STACK,
3145 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3146 table->stack_mc_list[i].addr);
3147 rc = efx_ef10_filter_insert(efx, &spec, true);
3149 /* Fall back to multicast-promisc */
3151 efx_ef10_filter_remove_safe(
3152 efx, EFX_FILTER_PRI_REQUIRED,
3153 table->stack_mc_list[i].id);
3154 table->stack_mc_count = -1;
3157 table->stack_mc_list[i].id = rc;
3160 if (table->stack_mc_count < 0) {
3161 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3162 EFX_FILTER_FLAG_RX_RSS |
3163 EFX_FILTER_FLAG_RX_STACK,
3165 efx_filter_set_mc_def(&spec);
3166 rc = efx_ef10_filter_insert(efx, &spec, true);
3169 table->stack_mc_count = 0;
3171 table->stack_mc_list[0].id = rc;
3175 /* Remove filters that weren't renewed. Since nothing else
3176 * changes the STACK_OLD flag or removes these filters, we
3177 * don't need to hold the filter_lock while scanning for
3180 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3181 if (ACCESS_ONCE(table->entry[i].spec) &
3182 EFX_EF10_FILTER_FLAG_STACK_OLD) {
3183 if (efx_ef10_filter_remove_internal(efx,
3184 EFX_FILTER_PRI_REQUIRED,
3186 remove_failed = true;
3189 WARN_ON(remove_failed);
3192 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
3194 efx_ef10_filter_sync_rx_mode(efx);
3196 return efx_mcdi_set_mac(efx);
3199 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
3201 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
3203 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
3204 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
3208 /* MC BISTs follow a different poll mechanism to phy BISTs.
3209 * The BIST is done in the poll handler on the MC, and the MCDI command
3210 * will block until the BIST is done.
3212 static int efx_ef10_poll_bist(struct efx_nic *efx)
3215 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
3219 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
3220 outbuf, sizeof(outbuf), &outlen);
3224 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
3227 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
3229 case MC_CMD_POLL_BIST_PASSED:
3230 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
3232 case MC_CMD_POLL_BIST_TIMEOUT:
3233 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
3235 case MC_CMD_POLL_BIST_FAILED:
3236 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
3239 netif_err(efx, hw, efx->net_dev,
3240 "BIST returned unknown result %u", result);
3245 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
3249 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
3251 rc = efx_ef10_start_bist(efx, bist_type);
3255 return efx_ef10_poll_bist(efx);
3259 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
3263 efx_reset_down(efx, RESET_TYPE_WORLD);
3265 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
3266 NULL, 0, NULL, 0, NULL);
3270 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
3271 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
3273 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
3276 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
3277 return rc ? rc : rc2;
3280 #ifdef CONFIG_SFC_MTD
3282 struct efx_ef10_nvram_type_info {
3283 u16 type, type_mask;
3288 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
3289 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
3290 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
3291 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
3292 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
3293 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
3294 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
3295 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
3296 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
3297 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
3298 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
3301 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
3302 struct efx_mcdi_mtd_partition *part,
3305 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
3306 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
3307 const struct efx_ef10_nvram_type_info *info;
3308 size_t size, erase_size, outlen;
3312 for (info = efx_ef10_nvram_types; ; info++) {
3314 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
3316 if ((type & ~info->type_mask) == info->type)
3319 if (info->port != efx_port_num(efx))
3322 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
3326 return -ENODEV; /* hide it */
3328 part->nvram_type = type;
3330 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
3331 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
3332 outbuf, sizeof(outbuf), &outlen);
3335 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
3337 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
3338 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
3339 part->fw_subtype = MCDI_DWORD(outbuf,
3340 NVRAM_METADATA_OUT_SUBTYPE);
3342 part->common.dev_type_name = "EF10 NVRAM manager";
3343 part->common.type_name = info->name;
3345 part->common.mtd.type = MTD_NORFLASH;
3346 part->common.mtd.flags = MTD_CAP_NORFLASH;
3347 part->common.mtd.size = size;
3348 part->common.mtd.erasesize = erase_size;
3353 static int efx_ef10_mtd_probe(struct efx_nic *efx)
3355 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
3356 struct efx_mcdi_mtd_partition *parts;
3357 size_t outlen, n_parts_total, i, n_parts;
3363 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
3364 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
3365 outbuf, sizeof(outbuf), &outlen);
3368 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
3371 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
3373 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
3376 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
3381 for (i = 0; i < n_parts_total; i++) {
3382 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
3384 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
3387 else if (rc != -ENODEV)
3391 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
3398 #endif /* CONFIG_SFC_MTD */
3400 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
3402 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
3405 const struct efx_nic_type efx_hunt_a0_nic_type = {
3406 .mem_map_size = efx_ef10_mem_map_size,
3407 .probe = efx_ef10_probe,
3408 .remove = efx_ef10_remove,
3409 .dimension_resources = efx_ef10_dimension_resources,
3410 .init = efx_ef10_init_nic,
3411 .fini = efx_port_dummy_op_void,
3412 .map_reset_reason = efx_mcdi_map_reset_reason,
3413 .map_reset_flags = efx_ef10_map_reset_flags,
3414 .reset = efx_mcdi_reset,
3415 .probe_port = efx_mcdi_port_probe,
3416 .remove_port = efx_mcdi_port_remove,
3417 .fini_dmaq = efx_ef10_fini_dmaq,
3418 .describe_stats = efx_ef10_describe_stats,
3419 .update_stats = efx_ef10_update_stats,
3420 .start_stats = efx_mcdi_mac_start_stats,
3421 .stop_stats = efx_mcdi_mac_stop_stats,
3422 .set_id_led = efx_mcdi_set_id_led,
3423 .push_irq_moderation = efx_ef10_push_irq_moderation,
3424 .reconfigure_mac = efx_ef10_mac_reconfigure,
3425 .check_mac_fault = efx_mcdi_mac_check_fault,
3426 .reconfigure_port = efx_mcdi_port_reconfigure,
3427 .get_wol = efx_ef10_get_wol,
3428 .set_wol = efx_ef10_set_wol,
3429 .resume_wol = efx_port_dummy_op_void,
3430 .test_chip = efx_ef10_test_chip,
3431 .test_nvram = efx_mcdi_nvram_test_all,
3432 .mcdi_request = efx_ef10_mcdi_request,
3433 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
3434 .mcdi_read_response = efx_ef10_mcdi_read_response,
3435 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
3436 .irq_enable_master = efx_port_dummy_op_void,
3437 .irq_test_generate = efx_ef10_irq_test_generate,
3438 .irq_disable_non_ev = efx_port_dummy_op_void,
3439 .irq_handle_msi = efx_ef10_msi_interrupt,
3440 .irq_handle_legacy = efx_ef10_legacy_interrupt,
3441 .tx_probe = efx_ef10_tx_probe,
3442 .tx_init = efx_ef10_tx_init,
3443 .tx_remove = efx_ef10_tx_remove,
3444 .tx_write = efx_ef10_tx_write,
3445 .rx_push_indir_table = efx_ef10_rx_push_indir_table,
3446 .rx_probe = efx_ef10_rx_probe,
3447 .rx_init = efx_ef10_rx_init,
3448 .rx_remove = efx_ef10_rx_remove,
3449 .rx_write = efx_ef10_rx_write,
3450 .rx_defer_refill = efx_ef10_rx_defer_refill,
3451 .ev_probe = efx_ef10_ev_probe,
3452 .ev_init = efx_ef10_ev_init,
3453 .ev_fini = efx_ef10_ev_fini,
3454 .ev_remove = efx_ef10_ev_remove,
3455 .ev_process = efx_ef10_ev_process,
3456 .ev_read_ack = efx_ef10_ev_read_ack,
3457 .ev_test_generate = efx_ef10_ev_test_generate,
3458 .filter_table_probe = efx_ef10_filter_table_probe,
3459 .filter_table_restore = efx_ef10_filter_table_restore,
3460 .filter_table_remove = efx_ef10_filter_table_remove,
3461 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3462 .filter_insert = efx_ef10_filter_insert,
3463 .filter_remove_safe = efx_ef10_filter_remove_safe,
3464 .filter_get_safe = efx_ef10_filter_get_safe,
3465 .filter_clear_rx = efx_ef10_filter_clear_rx,
3466 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
3467 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3468 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3469 #ifdef CONFIG_RFS_ACCEL
3470 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
3471 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3473 #ifdef CONFIG_SFC_MTD
3474 .mtd_probe = efx_ef10_mtd_probe,
3475 .mtd_rename = efx_mcdi_mtd_rename,
3476 .mtd_read = efx_mcdi_mtd_read,
3477 .mtd_erase = efx_mcdi_mtd_erase,
3478 .mtd_write = efx_mcdi_mtd_write,
3479 .mtd_sync = efx_mcdi_mtd_sync,
3481 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
3483 .revision = EFX_REV_HUNT_A0,
3484 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3485 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3486 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3487 .can_rx_scatter = true,
3488 .always_rx_scatter = true,
3489 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3490 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3491 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3492 NETIF_F_RXHASH | NETIF_F_NTUPLE),
3494 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,