2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
21 #include <linux/pci.h>
22 #include <linux/etherdevice.h>
23 #include <linux/delay.h>
27 #include <linux/if_vlan.h>
28 #include <linux/workqueue.h>
29 #include <linux/interrupt.h>
30 #include <linux/firmware.h>
31 #include <linux/slab.h>
32 #include <linux/u64_stats_sync.h>
36 #define DRV_VER "4.0.100u"
37 #define DRV_NAME "be2net"
38 #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39 #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40 #define OC_NAME "Emulex OneConnect 10Gbps NIC"
41 #define OC_NAME_BE OC_NAME "(be3)"
42 #define OC_NAME_LANCER OC_NAME "(Lancer)"
43 #define OC_NAME_SH OC_NAME "(Skyhawk)"
44 #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
46 #define BE_VENDOR_ID 0x19a2
47 #define EMULEX_VENDOR_ID 0x10df
48 #define BE_DEVICE_ID1 0x211
49 #define BE_DEVICE_ID2 0x221
50 #define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
51 #define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
52 #define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
53 #define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
54 #define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
56 static inline char *nic_name(struct pci_dev *pdev)
58 switch (pdev->device) {
65 return OC_NAME_LANCER;
75 /* Number of bytes of an RX frame that are copied to skb->data */
76 #define BE_HDR_LEN ((u16) 64)
77 /* allocate extra space to allow tunneling decapsulation without head reallocation */
78 #define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
80 #define BE_MAX_JUMBO_FRAME_SIZE 9018
81 #define BE_MIN_MTU 256
83 #define BE_NUM_VLANS_SUPPORTED 64
85 #define BE_MAX_TX_FRAG_COUNT 30
87 #define EVNT_Q_LEN 1024
89 #define TX_CQ_LEN 1024
90 #define RX_Q_LEN 1024 /* Does not support any other value */
91 #define RX_CQ_LEN 1024
92 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
93 #define MCC_CQ_LEN 256
95 #define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
96 #define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
98 #define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
99 #define BE_NAPI_WEIGHT 64
100 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
101 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
103 #define FW_VER_LEN 32
111 struct be_queue_info {
112 struct be_dma_mem dma_mem;
114 u16 entry_size; /* Size of an element in the queue */
118 atomic_t used; /* Number of valid elements in the queue */
121 static inline u32 MODULO(u16 val, u16 limit)
123 BUG_ON(limit & (limit - 1));
124 return val & (limit - 1);
127 static inline void index_adv(u16 *index, u16 val, u16 limit)
129 *index = MODULO((*index + val), limit);
132 static inline void index_inc(u16 *index, u16 limit)
134 *index = MODULO((*index + 1), limit);
137 static inline void *queue_head_node(struct be_queue_info *q)
139 return q->dma_mem.va + q->head * q->entry_size;
142 static inline void *queue_tail_node(struct be_queue_info *q)
144 return q->dma_mem.va + q->tail * q->entry_size;
147 static inline void *queue_index_node(struct be_queue_info *q, u16 index)
149 return q->dma_mem.va + index * q->entry_size;
152 static inline void queue_head_inc(struct be_queue_info *q)
154 index_inc(&q->head, q->len);
157 static inline void queue_tail_inc(struct be_queue_info *q)
159 index_inc(&q->tail, q->len);
163 struct be_queue_info q;
166 /* Adaptive interrupt coalescing (AIC) info */
168 u16 min_eqd; /* in usecs */
169 u16 max_eqd; /* in usecs */
170 u16 cur_eqd; /* in usecs */
173 struct napi_struct napi;
177 struct be_queue_info q;
178 struct be_queue_info cq;
190 struct u64_stats_sync sync;
191 struct u64_stats_sync sync_compl;
195 struct be_queue_info q;
196 struct be_queue_info cq;
197 /* Remember the skbs that were transmitted */
198 struct sk_buff *sent_skb_list[TX_Q_LEN];
199 struct be_tx_stats stats;
202 /* Struct to remember the pages posted for rx frags */
203 struct be_rx_page_info {
205 DEFINE_DMA_UNMAP_ADDR(bus);
215 u32 rx_drops_no_skbs; /* skb allocation errors */
216 u32 rx_drops_no_frags; /* HW has no fetched frags */
217 u32 rx_post_fail; /* page post alloc failures */
218 u32 rx_polls; /* NAPI calls */
222 u32 rx_compl_err; /* completions with err set */
223 u32 rx_pps; /* pkts per second */
224 struct u64_stats_sync sync;
227 struct be_rx_compl_info {
247 struct be_adapter *adapter;
248 struct be_queue_info q;
249 struct be_queue_info cq;
250 struct be_rx_compl_info rxcp;
251 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
252 struct be_eq_obj rx_eq;
253 struct be_rx_stats stats;
255 bool rx_post_starved; /* Zero rx frags have been posted to BE */
256 u32 cache_line_barrier[16];
259 struct be_drv_stats {
260 u32 be_on_die_temperature;
263 u32 rx_drops_no_pbuf;
264 u32 rx_drops_no_txpb;
265 u32 rx_drops_no_erx_descr;
266 u32 rx_drops_no_tpre_descr;
267 u32 rx_drops_too_many_frags;
268 u32 forwarded_packets;
271 u32 rx_alignment_symbol_errors;
273 u32 rx_priority_pause_frames;
274 u32 rx_control_frames;
275 u32 rx_in_range_errors;
276 u32 rx_out_range_errors;
277 u32 rx_frame_too_long;
278 u32 rx_address_mismatch_drops;
279 u32 rx_dropped_too_small;
280 u32 rx_dropped_too_short;
281 u32 rx_dropped_header_too_small;
282 u32 rx_dropped_tcp_length;
284 u32 rx_ip_checksum_errs;
285 u32 rx_tcp_checksum_errs;
286 u32 rx_udp_checksum_errs;
288 u32 tx_priority_pauseframes;
289 u32 tx_controlframes;
290 u32 rxpp_fifo_overflow_drop;
291 u32 rx_input_fifo_overflow_drop;
292 u32 pmem_fifo_overflow_drop;
297 unsigned char mac_addr[ETH_ALEN];
304 #define BE_FLAGS_LINK_STATUS_INIT 1
307 struct pci_dev *pdev;
308 struct net_device *netdev;
311 u8 __iomem *db; /* Door Bell */
313 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
314 struct be_dma_mem mbox_mem;
315 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
316 * is stored for freeing purpose */
317 struct be_dma_mem mbox_mem_alloced;
319 struct be_mcc_obj mcc_obj;
320 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
321 spinlock_t mcc_cq_lock;
323 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
328 struct be_eq_obj tx_eq;
329 struct be_tx_obj tx_obj[MAX_TX_QS];
332 u32 cache_line_break[8];
335 struct be_rx_obj rx_obj[MAX_RX_QS];
337 u32 big_page_size; /* Compounded page size shared by rx wrbs */
340 struct be_drv_stats drv_stats;
343 u16 max_vlans; /* Number of vlans supported */
344 u8 vlan_tag[VLAN_N_VID];
345 u8 vlan_prio_bmap; /* Available Priority BitMap */
346 u16 recommended_prio; /* Recommended Priority */
347 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
349 struct be_dma_mem stats_cmd;
350 /* Work queue used to perform periodic tasks like getting statistics */
351 struct delayed_work work;
355 /* Ethtool knobs and info */
356 char fw_ver[FW_VER_LEN];
357 int if_handle; /* Used to configure filtering */
358 u32 pmac_id; /* MAC addr handle used by BE card */
359 u32 beacon_state; /* for set_phys_id */
369 u32 rx_fc; /* Rx flow control */
370 u32 tx_fc; /* Tx flow control */
376 u8 generation; /* BladeEngine ASIC generation */
378 struct completion flash_compl;
382 struct be_vf_cfg *vf_cfg;
389 #define be_physfn(adapter) (!adapter->is_virtfn)
390 #define sriov_enabled(adapter) (adapter->num_vfs > 0)
391 #define for_all_vfs(adapter, vf_cfg, i) \
392 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
395 /* BladeEngine Generation numbers */
401 #define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
402 (adapter->pdev->device == OC_DEVICE_ID4))
404 extern const struct ethtool_ops be_ethtool_ops;
406 #define msix_enabled(adapter) (adapter->num_msix_vec > 0)
407 #define tx_stats(txo) (&txo->stats)
408 #define rx_stats(rxo) (&rxo->stats)
410 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
412 #define for_all_rx_queues(adapter, rxo, i) \
413 for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
416 /* Just skip the first default non-rss queue */
417 #define for_all_rss_queues(adapter, rxo, i) \
418 for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
421 #define for_all_tx_queues(adapter, txo, i) \
422 for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
425 #define PAGE_SHIFT_4K 12
426 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
428 /* Returns number of pages spanned by the data starting at the given addr */
429 #define PAGES_4K_SPANNED(_address, size) \
430 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
431 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
433 /* Byte offset into the page corresponding to given address */
434 #define OFFSET_IN_PAGE(addr) \
435 ((size_t)(addr) & (PAGE_SIZE_4K-1))
437 /* Returns bit offset within a DWORD of a bitfield */
438 #define AMAP_BIT_OFFSET(_struct, field) \
439 (((size_t)&(((_struct *)0)->field))%32)
441 /* Returns the bit mask of the field that is NOT shifted into location. */
442 static inline u32 amap_mask(u32 bitsize)
444 return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
448 amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
450 u32 *dw = (u32 *) ptr + dw_offset;
451 *dw &= ~(mask << offset);
452 *dw |= (mask & value) << offset;
455 #define AMAP_SET_BITS(_struct, field, ptr, val) \
457 offsetof(_struct, field)/32, \
458 amap_mask(sizeof(((_struct *)0)->field)), \
459 AMAP_BIT_OFFSET(_struct, field), \
462 static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
464 u32 *dw = (u32 *) ptr;
465 return mask & (*(dw + dw_offset) >> offset);
468 #define AMAP_GET_BITS(_struct, field, ptr) \
470 offsetof(_struct, field)/32, \
471 amap_mask(sizeof(((_struct *)0)->field)), \
472 AMAP_BIT_OFFSET(_struct, field))
474 #define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
475 #define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
476 static inline void swap_dws(void *wrb, int len)
482 *dw = cpu_to_le32(*dw);
486 #endif /* __BIG_ENDIAN */
489 static inline u8 is_tcp_pkt(struct sk_buff *skb)
493 if (ip_hdr(skb)->version == 4)
494 val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
495 else if (ip_hdr(skb)->version == 6)
496 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
501 static inline u8 is_udp_pkt(struct sk_buff *skb)
505 if (ip_hdr(skb)->version == 4)
506 val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
507 else if (ip_hdr(skb)->version == 6)
508 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
513 static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
517 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
518 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
521 static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
525 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
527 mac[5] = (u8)(addr & 0xFF);
528 mac[4] = (u8)((addr >> 8) & 0xFF);
529 mac[3] = (u8)((addr >> 16) & 0xFF);
530 /* Use the OUI from the current MAC address */
531 memcpy(mac, adapter->netdev->dev_addr, 3);
534 static inline bool be_multi_rxq(const struct be_adapter *adapter)
536 return adapter->num_rx_qs > 1;
539 static inline bool be_error(struct be_adapter *adapter)
541 return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
544 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
546 extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
547 extern void be_parse_stats(struct be_adapter *adapter);
548 extern int be_load_fw(struct be_adapter *adapter, u8 *func);