1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/compiler.h>
35 #include <linux/version.h>
36 #include <linux/workqueue.h>
37 #include <linux/netdevice.h>
38 #include <linux/interrupt.h>
39 #include <linux/bitmap.h>
40 #include <linux/kernel.h>
41 #include <linux/mutex.h>
42 #include <linux/bpf.h>
44 #include <linux/qed/common_hsi.h>
45 #include <linux/qed/eth_common.h>
46 #include <linux/qed/qed_if.h>
47 #include <linux/qed/qed_chain.h>
48 #include <linux/qed/qed_eth_if.h>
50 #define QEDE_MAJOR_VERSION 8
51 #define QEDE_MINOR_VERSION 10
52 #define QEDE_REVISION_VERSION 9
53 #define QEDE_ENGINEERING_VERSION 20
54 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
55 __stringify(QEDE_MINOR_VERSION) "." \
56 __stringify(QEDE_REVISION_VERSION) "." \
57 __stringify(QEDE_ENGINEERING_VERSION)
59 #define DRV_MODULE_SYM qede
63 u64 packet_too_big_discard;
71 u64 mftag_filter_discards;
72 u64 mac_filter_discards;
82 u64 coalesced_aborts_num;
83 u64 non_coalesced_pkts;
87 u64 rx_64_byte_packets;
88 u64 rx_65_to_127_byte_packets;
89 u64 rx_128_to_255_byte_packets;
90 u64 rx_256_to_511_byte_packets;
91 u64 rx_512_to_1023_byte_packets;
92 u64 rx_1024_to_1518_byte_packets;
93 u64 rx_1519_to_1522_byte_packets;
94 u64 rx_1519_to_2047_byte_packets;
95 u64 rx_2048_to_4095_byte_packets;
96 u64 rx_4096_to_9216_byte_packets;
97 u64 rx_9217_to_16383_byte_packets;
99 u64 rx_mac_crtl_frames;
103 u64 rx_carrier_errors;
104 u64 rx_oversize_packets;
106 u64 rx_undersize_packets;
108 u64 tx_64_byte_packets;
109 u64 tx_65_to_127_byte_packets;
110 u64 tx_128_to_255_byte_packets;
111 u64 tx_256_to_511_byte_packets;
112 u64 tx_512_to_1023_byte_packets;
113 u64 tx_1024_to_1518_byte_packets;
114 u64 tx_1519_to_2047_byte_packets;
115 u64 tx_2048_to_4095_byte_packets;
116 u64 tx_4096_to_9216_byte_packets;
117 u64 tx_9217_to_16383_byte_packets;
120 u64 tx_lpi_entry_count;
121 u64 tx_total_collisions;
124 u64 tx_mac_ctrl_frames;
128 struct list_head list;
133 struct qede_rdma_dev {
134 struct qedr_dev *qedr_dev;
135 struct list_head entry;
136 struct list_head roce_event_list;
137 struct workqueue_struct *roce_wq;
141 struct qed_dev *cdev;
142 struct net_device *ndev;
143 struct pci_dev *pdev;
149 #define QEDE_FLAG_IS_VF BIT(0)
150 #define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
152 const struct qed_eth_ops *ops;
154 struct qed_dev_eth_info dev_info;
155 #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
156 #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
158 struct qede_fastpath *fp_array;
165 #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
166 #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
167 #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
169 struct qed_int_info int_info;
170 unsigned char primary_mac[ETH_ALEN];
172 /* Smaller private varaiant of the RTNL lock */
173 struct mutex qede_lock;
174 u32 state; /* Protected by qede_lock */
178 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
179 #define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
180 /* Max supported alignment is 256 (8 shift)
181 * minimal alignment shift 6 is optimal for 57xxx HW performance
183 #define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
184 /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
185 * at the end of skb->data, to avoid wasting a full cache line.
186 * This reduces memory use (skb->truesize).
188 #define QEDE_FW_RX_ALIGN_END \
189 max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
192 struct qede_stats stats;
193 #define QEDE_RSS_INDIR_INITED BIT(0)
194 #define QEDE_RSS_KEY_INITED BIT(1)
195 #define QEDE_RSS_CAPS_INITED BIT(2)
196 u32 rss_params_inited; /* bit-field to track initialized rss params */
197 struct qed_update_vport_rss_params rss_params;
198 u16 q_num_rx_buffers; /* Must be a power of two */
199 u16 q_num_tx_buffers; /* Must be a power of two */
202 struct list_head vlan_list;
203 u16 configured_vlans;
204 u16 non_configured_vlans;
205 bool accept_any_vlan;
206 struct delayed_work sp_task;
207 unsigned long sp_flags;
213 struct qede_rdma_dev rdma_info;
215 struct bpf_prog *xdp_prog;
223 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
226 #define MAX_NUM_PRI 8
228 /* The driver supports the new build_skb() API:
229 * RX ring buffer contains pointer to kmalloc() data only,
230 * skb are built only after the frame was DMA-ed.
235 unsigned int page_offset;
238 enum qede_agg_state {
239 QEDE_AGG_STATE_NONE = 0,
240 QEDE_AGG_STATE_START = 1,
241 QEDE_AGG_STATE_ERROR = 2
244 struct qede_agg_info {
245 /* rx_buf is a data buffer that can be placed / consumed from rx bd
246 * chain. It has two purposes: We will preallocate the data buffer
247 * for each aggregation when we open the interface and will place this
248 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
249 * to be in a state where allocation fails, as we can't reuse the
250 * consumer buffer in the rx-chain since FW may still be writing to it
251 * (since header needs to be modified for TPA).
252 * The second purpose is to keep a pointer to the bd buffer during
255 struct sw_rx_data buffer;
256 dma_addr_t buffer_mapping;
260 /* We need some structs from the start cookie until termination */
262 u16 start_cqe_bd_len;
263 u8 start_cqe_placement_offset;
271 struct qede_rx_queue {
273 void __iomem *hw_rxq_prod_addr;
275 /* Required for the allocation of replacement buffers */
278 struct bpf_prog *xdp_prog;
283 u16 num_rx_buffers; /* Slowpath */
292 struct sw_rx_data *sw_rx_ring;
293 struct qed_chain rx_bd_ring;
294 struct qed_chain rx_comp_ring ____cacheline_aligned;
297 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
309 struct eth_db_data data;
316 /* Set on the first BD descriptor when there is a split BD */
317 #define QEDE_TSO_SPLIT_BD BIT(0)
320 struct qede_tx_queue {
325 u16 num_tx_buffers; /* Slowpath only */
332 /* Needed for the mapping of packets */
335 void __iomem *doorbell_addr;
337 int index; /* Slowpath only */
338 #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
339 QEDE_MAX_TSS_CNT(edev))
340 #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
342 /* Regular Tx requires skb + metadata for release purpose,
343 * while XDP requires only the pages themselves.
346 struct sw_tx_bd *skbs;
350 struct qed_chain tx_pbl;
352 /* Slowpath; Should be kept in end [unless missing padding] */
356 #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
357 le32_to_cpu((bd)->addr.lo))
358 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
360 (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
361 (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
362 (bd)->nbytes = cpu_to_le16(len); \
364 #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
366 struct qede_fastpath {
367 struct qede_dev *edev;
368 #define QEDE_FASTPATH_TX BIT(0)
369 #define QEDE_FASTPATH_RX BIT(1)
370 #define QEDE_FASTPATH_XDP BIT(2)
371 #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
375 struct napi_struct napi;
376 struct qed_sb_info *sb_info;
377 struct qede_rx_queue *rxq;
378 struct qede_tx_queue *txq;
379 struct qede_tx_queue *xdp_tx;
381 #define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
382 char name[VEC_NAME_SIZE];
385 /* Debug print definitions */
386 #define DP_NAME(edev) ((edev)->ndev->name)
389 #define XMIT_L4_CSUM BIT(0)
390 #define XMIT_LSO BIT(1)
391 #define XMIT_ENC BIT(2)
392 #define XMIT_ENC_GSO_L4_CSUM BIT(3)
394 #define QEDE_CSUM_ERROR BIT(0)
395 #define QEDE_CSUM_UNNECESSARY BIT(1)
396 #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
398 #define QEDE_SP_RX_MODE 1
399 #define QEDE_SP_VXLAN_PORT_CONFIG 2
400 #define QEDE_SP_GENEVE_PORT_CONFIG 3
402 struct qede_reload_args {
403 void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
405 netdev_features_t features;
406 struct bpf_prog *new_prog;
411 /* Datapath functions definition */
412 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
413 netdev_features_t qede_features_check(struct sk_buff *skb,
414 struct net_device *dev,
415 netdev_features_t features);
416 void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
417 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq);
418 int qede_free_tx_pkt(struct qede_dev *edev,
419 struct qede_tx_queue *txq, int *len);
420 int qede_poll(struct napi_struct *napi, int budget);
421 irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
423 void qede_set_dcbnl_ops(struct net_device *ndev);
425 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
426 void qede_set_ethtool_ops(struct net_device *netdev);
427 void qede_reload(struct qede_dev *edev,
428 struct qede_reload_args *args, bool is_locked);
429 int qede_change_mtu(struct net_device *dev, int new_mtu);
430 void qede_fill_by_demand_stats(struct qede_dev *edev);
431 void __qede_lock(struct qede_dev *edev);
432 void __qede_unlock(struct qede_dev *edev);
433 bool qede_has_rx_work(struct qede_rx_queue *rxq);
434 int qede_txq_has_work(struct qede_tx_queue *txq);
435 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
436 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
438 #define RX_RING_SIZE_POW 13
439 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
440 #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
441 #define NUM_RX_BDS_MIN 128
442 #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
444 #define TX_RING_SIZE_POW 13
445 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
446 #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
447 #define NUM_TX_BDS_MIN 128
448 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
450 #define QEDE_MIN_PKT_LEN 64
451 #define QEDE_RX_HDR_SIZE 256
452 #define QEDE_MAX_JUMBO_PACKET_SIZE 9600
453 #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
455 #endif /* _QEDE_H_ */