1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/workqueue.h>
43 #include <linux/bitops.h>
44 #include <linux/delay.h>
45 #include <linux/errno.h>
46 #include <linux/etherdevice.h>
48 #include <linux/list.h>
49 #include <linux/mutex.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/qed/qed_ll2_if.h>
55 #include "qed_dev_api.h"
62 #include "qed_reg_addr.h"
66 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
69 #define QED_LL2_TX_SIZE (256)
70 #define QED_LL2_RX_SIZE (4096)
72 struct qed_cb_ll2_info {
78 /* Lock protecting LL2 buffer lists in sleepless context */
80 struct list_head list;
82 const struct qed_ll2_cb_ops *cbs;
86 struct qed_ll2_buffer {
87 struct list_head list;
92 static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
95 dma_addr_t first_frag_addr,
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
110 if (cdev->ll2->frags_mapped)
111 /* Case where mapped frags were received, need to
112 * free skb with nr_frags marked as 0
114 skb_shinfo(skb)->nr_frags = 0;
116 dev_kfree_skb_any(skb);
119 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
120 u8 **data, dma_addr_t *phys_addr)
122 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
128 *phys_addr = dma_map_single(&cdev->pdev->dev,
129 ((*data) + NET_SKB_PAD),
130 cdev->ll2->rx_size, DMA_FROM_DEVICE);
131 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
132 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
140 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
141 struct qed_ll2_buffer *buffer)
143 spin_lock_bh(&cdev->ll2->lock);
145 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
146 cdev->ll2->rx_size, DMA_FROM_DEVICE);
148 list_del(&buffer->list);
151 if (!cdev->ll2->rx_cnt)
152 DP_INFO(cdev, "All LL2 entries were removed\n");
154 spin_unlock_bh(&cdev->ll2->lock);
159 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161 struct qed_ll2_buffer *buffer, *tmp_buffer;
163 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
164 qed_ll2_dealloc_buffer(cdev, buffer);
167 static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
168 u8 connection_handle,
169 struct qed_ll2_rx_packet *p_pkt,
170 struct core_rx_fast_path_cqe *p_cqe,
173 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
174 struct qed_ll2_buffer *buffer = p_pkt->cookie;
175 struct qed_dev *cdev = p_hwfn->cdev;
176 u16 vlan = le16_to_cpu(p_cqe->vlan);
177 u32 opaque_data_0, opaque_data_1;
178 u8 pad = p_cqe->placement_offset;
179 dma_addr_t new_phys_addr;
185 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
186 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
189 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
190 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
191 (u64)p_pkt->rx_buf_addr, pad, packet_length,
192 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
193 opaque_data_0, opaque_data_1);
195 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
196 print_hex_dump(KERN_INFO, "",
197 DUMP_PREFIX_OFFSET, 16, 1,
198 buffer->data, packet_length, false);
201 /* Determine if data is valid */
202 if (packet_length < ETH_HLEN)
205 /* Allocate a replacement for buffer; Reuse upon failure */
207 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
210 /* If need to reuse or there's no replacement buffer, repost this */
213 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
214 cdev->ll2->rx_size, DMA_FROM_DEVICE);
216 skb = build_skb(buffer->data, 0);
223 skb_reserve(skb, pad);
224 skb_put(skb, packet_length);
225 skb_checksum_none_assert(skb);
227 /* Get parital ethernet information instead of eth_type_trans(),
228 * Since we don't have an associated net_device.
230 skb_reset_mac_header(skb);
231 skb->protocol = eth_hdr(skb)->h_proto;
233 /* Pass SKB onward */
234 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
236 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
237 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
238 opaque_data_0, opaque_data_1);
241 /* Update Buffer information and update FW producer */
242 buffer->data = new_data;
243 buffer->phys_addr = new_phys_addr;
246 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
247 buffer->phys_addr, 0, buffer, 1);
250 qed_ll2_dealloc_buffer(cdev, buffer);
253 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
254 u8 connection_handle,
258 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
260 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
263 if (!p_hwfn->p_ll2_info)
266 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
270 mutex_lock(&p_ll2_conn->mutex);
271 if (p_ll2_conn->b_active)
274 mutex_unlock(&p_ll2_conn->mutex);
282 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
283 u8 connection_handle)
285 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
288 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
289 u8 connection_handle)
291 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
294 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
296 u8 connection_handle)
298 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
301 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
303 bool b_last_packet = false, b_last_frag = false;
304 struct qed_ll2_tx_packet *p_pkt = NULL;
305 struct qed_ll2_info *p_ll2_conn;
306 struct qed_ll2_tx_queue *p_tx;
309 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
313 p_tx = &p_ll2_conn->tx_queue;
315 while (!list_empty(&p_tx->active_descq)) {
316 p_pkt = list_first_entry(&p_tx->active_descq,
317 struct qed_ll2_tx_packet, list_entry);
321 list_del(&p_pkt->list_entry);
322 b_last_packet = list_empty(&p_tx->active_descq);
323 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
324 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
325 struct qed_ooo_buffer *p_buffer;
327 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
328 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
331 p_tx->cur_completing_packet = *p_pkt;
332 p_tx->cur_completing_bd_idx = 1;
334 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
335 tx_frag = p_pkt->bds_set[0].tx_frag;
336 if (p_ll2_conn->conn.gsi_enable)
337 qed_ll2b_release_tx_gsi_packet(p_hwfn,
345 qed_ll2b_complete_tx_packet(p_hwfn,
355 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
357 struct qed_ll2_info *p_ll2_conn = p_cookie;
358 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
359 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
360 struct qed_ll2_tx_packet *p_pkt;
361 bool b_last_frag = false;
366 spin_lock_irqsave(&p_tx->lock, flags);
367 if (p_tx->b_completing_packet) {
372 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
373 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
375 if (list_empty(&p_tx->active_descq))
378 p_pkt = list_first_entry(&p_tx->active_descq,
379 struct qed_ll2_tx_packet, list_entry);
383 p_tx->b_completing_packet = true;
384 p_tx->cur_completing_packet = *p_pkt;
385 num_bds_in_packet = p_pkt->bd_used;
386 list_del(&p_pkt->list_entry);
388 if (num_bds < num_bds_in_packet) {
390 "Rest of BDs does not cover whole packet\n");
394 num_bds -= num_bds_in_packet;
395 p_tx->bds_idx += num_bds_in_packet;
396 while (num_bds_in_packet--)
397 qed_chain_consume(&p_tx->txq_chain);
399 p_tx->cur_completing_bd_idx = 1;
400 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
401 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
403 spin_unlock_irqrestore(&p_tx->lock, flags);
404 tx_frag = p_pkt->bds_set[0].tx_frag;
405 if (p_ll2_conn->conn.gsi_enable)
406 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
410 b_last_frag, !num_bds);
412 qed_ll2b_complete_tx_packet(p_hwfn,
416 b_last_frag, !num_bds);
417 spin_lock_irqsave(&p_tx->lock, flags);
420 p_tx->b_completing_packet = false;
423 spin_unlock_irqrestore(&p_tx->lock, flags);
428 qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
429 struct qed_ll2_info *p_ll2_info,
430 union core_rx_cqe_union *p_cqe,
431 unsigned long lock_flags, bool b_last_cqe)
433 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
434 struct qed_ll2_rx_packet *p_pkt = NULL;
435 u16 packet_length, parse_flags, vlan;
439 if (!list_empty(&p_rx->active_descq))
440 p_pkt = list_first_entry(&p_rx->active_descq,
441 struct qed_ll2_rx_packet, list_entry);
444 "GSI Rx completion but active_descq is empty\n");
448 list_del(&p_pkt->list_entry);
449 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
450 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
451 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
452 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
453 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
454 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
456 "Mismatch between active_descq and the LL2 Rx chain\n");
457 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
459 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
460 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
465 p_cqe->rx_cqe_gsi.data_length_error,
469 src_mac_addrlo, b_last_cqe);
470 spin_lock_irqsave(&p_rx->lock, lock_flags);
475 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
476 struct qed_ll2_info *p_ll2_conn,
477 union core_rx_cqe_union *p_cqe,
478 unsigned long *p_lock_flags,
481 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
482 struct qed_ll2_rx_packet *p_pkt = NULL;
484 if (!list_empty(&p_rx->active_descq))
485 p_pkt = list_first_entry(&p_rx->active_descq,
486 struct qed_ll2_rx_packet, list_entry);
489 "LL2 Rx completion but active_descq is empty\n");
492 list_del(&p_pkt->list_entry);
494 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
496 "Mismatch between active_descq and the LL2 Rx chain\n");
497 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
499 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
500 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
501 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
502 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
507 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
509 struct qed_ll2_info *p_ll2_conn = cookie;
510 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
511 union core_rx_cqe_union *cqe = NULL;
512 u16 cq_new_idx = 0, cq_old_idx = 0;
513 unsigned long flags = 0;
516 spin_lock_irqsave(&p_rx->lock, flags);
517 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
518 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
520 while (cq_new_idx != cq_old_idx) {
521 bool b_last_cqe = (cq_new_idx == cq_old_idx);
523 cqe = qed_chain_consume(&p_rx->rcq_chain);
524 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
528 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
529 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
531 switch (cqe->rx_cqe_sp.type) {
532 case CORE_RX_CQE_TYPE_SLOW_PATH:
533 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
536 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
537 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
538 cqe, flags, b_last_cqe);
540 case CORE_RX_CQE_TYPE_REGULAR:
541 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
550 spin_unlock_irqrestore(&p_rx->lock, flags);
554 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
556 struct qed_ll2_info *p_ll2_conn = NULL;
557 struct qed_ll2_rx_packet *p_pkt = NULL;
558 struct qed_ll2_rx_queue *p_rx;
560 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
564 p_rx = &p_ll2_conn->rx_queue;
566 while (!list_empty(&p_rx->active_descq)) {
567 dma_addr_t rx_buf_addr;
571 p_pkt = list_first_entry(&p_rx->active_descq,
572 struct qed_ll2_rx_packet, list_entry);
576 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
578 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
579 struct qed_ooo_buffer *p_buffer;
581 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
582 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
585 rx_buf_addr = p_pkt->rx_buf_addr;
586 cookie = p_pkt->cookie;
588 b_last = list_empty(&p_rx->active_descq);
593 #if IS_ENABLED(CONFIG_QED_ISCSI)
594 static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
598 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
599 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
604 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
605 struct qed_ll2_info *p_ll2_conn)
607 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
608 u16 packet_length = 0, parse_flags = 0, vlan = 0;
609 struct qed_ll2_rx_packet *p_pkt = NULL;
610 u32 num_ooo_add_to_peninsula = 0, cid;
611 union core_rx_cqe_union *cqe = NULL;
612 u16 cq_new_idx = 0, cq_old_idx = 0;
613 struct qed_ooo_buffer *p_buffer;
614 struct ooo_opaque *iscsi_ooo;
615 u8 placement_offset = 0;
618 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
619 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
620 if (cq_new_idx == cq_old_idx)
623 while (cq_new_idx != cq_old_idx) {
624 struct core_rx_fast_path_cqe *p_cqe_fp;
626 cqe = qed_chain_consume(&p_rx->rcq_chain);
627 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
628 cqe_type = cqe->rx_cqe_sp.type;
630 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
632 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
636 p_cqe_fp = &cqe->rx_cqe_fp;
638 placement_offset = p_cqe_fp->placement_offset;
639 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
640 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
641 vlan = le16_to_cpu(p_cqe_fp->vlan);
642 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
643 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
645 cid = le32_to_cpu(iscsi_ooo->cid);
647 /* Process delete isle first */
648 if (iscsi_ooo->drop_size)
649 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
650 iscsi_ooo->drop_isle,
651 iscsi_ooo->drop_size);
653 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
656 /* Now process create/add/join isles */
657 if (list_empty(&p_rx->active_descq)) {
659 "LL2 OOO RX chain has no submitted buffers\n"
664 p_pkt = list_first_entry(&p_rx->active_descq,
665 struct qed_ll2_rx_packet, list_entry);
667 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
668 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
669 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
670 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
671 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
674 "LL2 OOO RX packet is not valid\n");
677 list_del(&p_pkt->list_entry);
678 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
679 p_buffer->packet_length = packet_length;
680 p_buffer->parse_flags = parse_flags;
681 p_buffer->vlan = vlan;
682 p_buffer->placement_offset = placement_offset;
683 qed_chain_consume(&p_rx->rxq_chain);
684 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
686 switch (iscsi_ooo->ooo_opcode) {
687 case TCP_EVENT_ADD_NEW_ISLE:
688 qed_ooo_add_new_isle(p_hwfn,
694 case TCP_EVENT_ADD_ISLE_RIGHT:
695 qed_ooo_add_new_buffer(p_hwfn,
702 case TCP_EVENT_ADD_ISLE_LEFT:
703 qed_ooo_add_new_buffer(p_hwfn,
711 qed_ooo_add_new_buffer(p_hwfn,
714 iscsi_ooo->ooo_isle +
718 qed_ooo_join_isles(p_hwfn,
720 cid, iscsi_ooo->ooo_isle);
722 case TCP_EVENT_ADD_PEN:
723 num_ooo_add_to_peninsula++;
724 qed_ooo_put_ready_buffer(p_hwfn,
731 "Unexpected event (%d) TX OOO completion\n",
732 iscsi_ooo->ooo_opcode);
740 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
741 struct qed_ll2_info *p_ll2_conn)
743 struct qed_ooo_buffer *p_buffer;
746 dma_addr_t first_frag;
750 /* Submit Tx buffers here */
751 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
752 p_hwfn->p_ooo_info))) {
756 first_frag = p_buffer->rx_buffer_phys_addr +
757 p_buffer->placement_offset;
758 parse_flags = p_buffer->parse_flags;
759 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
760 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
761 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
763 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
764 p_buffer->vlan, bd_flags,
766 p_ll2_conn->conn.tx_dest, 0,
768 p_buffer->packet_length,
771 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
779 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
780 struct qed_ll2_info *p_ll2_conn)
782 struct qed_ooo_buffer *p_buffer;
785 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
786 p_hwfn->p_ooo_info))) {
787 rc = qed_ll2_post_rx_buffer(p_hwfn,
789 p_buffer->rx_buffer_phys_addr,
792 qed_ooo_put_free_buffer(p_hwfn,
793 p_hwfn->p_ooo_info, p_buffer);
799 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
801 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
804 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
808 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
809 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
814 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
816 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
817 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
818 struct qed_ll2_tx_packet *p_pkt = NULL;
819 struct qed_ooo_buffer *p_buffer;
820 bool b_dont_submit_rx = false;
821 u16 new_idx = 0, num_bds = 0;
824 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
825 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
831 if (list_empty(&p_tx->active_descq))
834 p_pkt = list_first_entry(&p_tx->active_descq,
835 struct qed_ll2_tx_packet, list_entry);
839 if (p_pkt->bd_used != 1) {
841 "Unexpectedly many BDs(%d) in TX OOO completion\n",
846 list_del(&p_pkt->list_entry);
850 qed_chain_consume(&p_tx->txq_chain);
852 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
853 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
855 if (b_dont_submit_rx) {
856 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
861 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
862 p_buffer->rx_buffer_phys_addr, 0,
865 qed_ooo_put_free_buffer(p_hwfn,
866 p_hwfn->p_ooo_info, p_buffer);
867 b_dont_submit_rx = true;
871 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
877 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
878 struct qed_ll2_info *p_ll2_info,
879 u16 rx_num_ooo_buffers, u16 mtu)
881 struct qed_ooo_buffer *p_buf = NULL;
886 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
889 if (!rx_num_ooo_buffers)
892 for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
893 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
899 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
900 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
901 ETH_CACHE_LINE_SIZE - 1) &
902 ~(ETH_CACHE_LINE_SIZE - 1);
903 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
904 p_buf->rx_buffer_size,
905 &p_buf->rx_buffer_phys_addr,
913 p_buf->rx_buffer_virt_addr = p_virt;
914 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
917 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
918 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
919 rx_num_ooo_buffers, p_buf->rx_buffer_size);
926 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
927 struct qed_ll2_info *p_ll2_conn)
929 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
932 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
933 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
936 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
937 struct qed_ll2_info *p_ll2_conn)
939 struct qed_ooo_buffer *p_buffer;
941 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
944 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
945 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
946 p_hwfn->p_ooo_info))) {
947 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
948 p_buffer->rx_buffer_size,
949 p_buffer->rx_buffer_virt_addr,
950 p_buffer->rx_buffer_phys_addr);
955 static void qed_ll2_stop_ooo(struct qed_dev *cdev)
957 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
958 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
960 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
963 qed_ll2_terminate_connection(hwfn, *handle);
964 qed_ll2_release_connection(hwfn, *handle);
965 *handle = QED_LL2_UNUSED_HANDLE;
968 static int qed_ll2_start_ooo(struct qed_dev *cdev,
969 struct qed_ll2_params *params)
971 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
972 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
973 struct qed_ll2_conn ll2_info = { 0 };
976 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
977 ll2_info.mtu = params->mtu;
978 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
979 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
980 ll2_info.tx_tc = OOO_LB_TC;
981 ll2_info.tx_dest = CORE_TX_DEST_LB;
983 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
984 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
987 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
991 rc = qed_ll2_establish_connection(hwfn, *handle);
993 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
1000 qed_ll2_release_connection(hwfn, *handle);
1002 *handle = QED_LL2_UNUSED_HANDLE;
1005 #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1006 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1007 void *p_cookie) { return -EINVAL; }
1008 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1009 void *p_cookie) { return -EINVAL; }
1011 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1012 struct qed_ll2_info *p_ll2_info,
1013 u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1015 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1016 struct qed_ll2_info *p_ll2_conn) { return; }
1018 qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1019 struct qed_ll2_info *p_ll2_conn) { return; }
1020 static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1021 static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1022 struct qed_ll2_params *params)
1024 #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1026 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1027 struct qed_ll2_info *p_ll2_conn,
1030 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1031 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1032 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1033 struct qed_spq_entry *p_ent = NULL;
1034 struct qed_sp_init_data init_data;
1039 memset(&init_data, 0, sizeof(init_data));
1040 init_data.cid = p_ll2_conn->cid;
1041 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1042 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1044 rc = qed_sp_init_request(p_hwfn, &p_ent,
1045 CORE_RAMROD_RX_QUEUE_START,
1046 PROTOCOLID_CORE, &init_data);
1050 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1052 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1053 p_ramrod->sb_index = p_rx->rx_sb_index;
1054 p_ramrod->complete_event_flg = 1;
1056 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1057 DMA_REGPAIR_LE(p_ramrod->bd_base,
1058 p_rx->rxq_chain.p_phys_addr);
1059 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1060 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1061 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1062 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1064 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1065 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
1066 p_ramrod->queue_id = p_ll2_conn->queue_id;
1067 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1070 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1071 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1072 p_ramrod->mf_si_bcast_accept_all = 1;
1073 p_ramrod->mf_si_mcast_accept_all = 1;
1075 p_ramrod->mf_si_bcast_accept_all = 0;
1076 p_ramrod->mf_si_mcast_accept_all = 0;
1079 p_ramrod->action_on_error.error_type = action_on_error;
1080 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1081 return qed_spq_post(p_hwfn, p_ent, NULL);
1084 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1085 struct qed_ll2_info *p_ll2_conn)
1087 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1088 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1089 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1090 struct qed_spq_entry *p_ent = NULL;
1091 struct qed_sp_init_data init_data;
1092 u16 pq_id = 0, pbl_size;
1095 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1098 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1099 p_ll2_conn->tx_stats_en = 0;
1101 p_ll2_conn->tx_stats_en = 1;
1104 memset(&init_data, 0, sizeof(init_data));
1105 init_data.cid = p_ll2_conn->cid;
1106 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1107 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1109 rc = qed_sp_init_request(p_hwfn, &p_ent,
1110 CORE_RAMROD_TX_QUEUE_START,
1111 PROTOCOLID_CORE, &init_data);
1115 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1117 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1118 p_ramrod->sb_index = p_tx->tx_sb_index;
1119 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1120 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1121 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1123 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1124 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1125 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1126 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1128 switch (p_ll2_conn->conn.tx_tc) {
1130 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1133 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
1136 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1140 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1142 switch (conn_type) {
1143 case QED_LL2_TYPE_FCOE:
1144 p_ramrod->conn_type = PROTOCOLID_FCOE;
1146 case QED_LL2_TYPE_ISCSI:
1147 case QED_LL2_TYPE_ISCSI_OOO:
1148 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1150 case QED_LL2_TYPE_ROCE:
1151 p_ramrod->conn_type = PROTOCOLID_ROCE;
1154 p_ramrod->conn_type = PROTOCOLID_ETH;
1155 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1158 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1159 return qed_spq_post(p_hwfn, p_ent, NULL);
1162 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1163 struct qed_ll2_info *p_ll2_conn)
1165 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1166 struct qed_spq_entry *p_ent = NULL;
1167 struct qed_sp_init_data init_data;
1171 memset(&init_data, 0, sizeof(init_data));
1172 init_data.cid = p_ll2_conn->cid;
1173 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1174 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1176 rc = qed_sp_init_request(p_hwfn, &p_ent,
1177 CORE_RAMROD_RX_QUEUE_STOP,
1178 PROTOCOLID_CORE, &init_data);
1182 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1184 p_ramrod->complete_event_flg = 1;
1185 p_ramrod->queue_id = p_ll2_conn->queue_id;
1187 return qed_spq_post(p_hwfn, p_ent, NULL);
1190 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1191 struct qed_ll2_info *p_ll2_conn)
1193 struct qed_spq_entry *p_ent = NULL;
1194 struct qed_sp_init_data init_data;
1198 memset(&init_data, 0, sizeof(init_data));
1199 init_data.cid = p_ll2_conn->cid;
1200 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1201 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1203 rc = qed_sp_init_request(p_hwfn, &p_ent,
1204 CORE_RAMROD_TX_QUEUE_STOP,
1205 PROTOCOLID_CORE, &init_data);
1209 return qed_spq_post(p_hwfn, p_ent, NULL);
1213 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1214 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1216 struct qed_ll2_rx_packet *p_descq;
1223 rc = qed_chain_alloc(p_hwfn->cdev,
1224 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1225 QED_CHAIN_MODE_NEXT_PTR,
1226 QED_CHAIN_CNT_TYPE_U16,
1228 sizeof(struct core_rx_bd),
1229 &p_ll2_info->rx_queue.rxq_chain);
1231 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1235 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1236 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1240 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1243 p_ll2_info->rx_queue.descq_array = p_descq;
1245 rc = qed_chain_alloc(p_hwfn->cdev,
1246 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1248 QED_CHAIN_CNT_TYPE_U16,
1250 sizeof(struct core_rx_fast_path_cqe),
1251 &p_ll2_info->rx_queue.rcq_chain);
1253 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1257 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1258 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1259 p_ll2_info->conn.conn_type, rx_num_desc);
1265 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1266 struct qed_ll2_info *p_ll2_info,
1269 struct qed_ll2_tx_packet *p_descq;
1276 rc = qed_chain_alloc(p_hwfn->cdev,
1277 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1279 QED_CHAIN_CNT_TYPE_U16,
1281 sizeof(struct core_tx_bd),
1282 &p_ll2_info->tx_queue.txq_chain);
1286 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1287 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1293 p_ll2_info->tx_queue.descq_array = p_descq;
1295 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1296 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1297 p_ll2_info->conn.conn_type, tx_num_desc);
1302 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1307 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1308 struct qed_ll2_conn *p_params,
1311 u8 *p_connection_handle)
1313 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1314 struct qed_ll2_info *p_ll2_info = NULL;
1318 if (!p_connection_handle || !p_hwfn->p_ll2_info)
1321 /* Find a free connection to be used */
1322 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1323 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1324 if (p_hwfn->p_ll2_info[i].b_active) {
1325 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1329 p_hwfn->p_ll2_info[i].b_active = true;
1330 p_ll2_info = &p_hwfn->p_ll2_info[i];
1331 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1337 p_ll2_info->conn = *p_params;
1339 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1341 goto q_allocate_fail;
1343 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1345 goto q_allocate_fail;
1347 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1348 rx_num_desc * 2, p_params->mtu);
1350 goto q_allocate_fail;
1352 /* Register callbacks for the Rx/Tx queues */
1353 if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1354 comp_rx_cb = qed_ll2_lb_rxq_completion;
1355 comp_tx_cb = qed_ll2_lb_txq_completion;
1357 comp_rx_cb = qed_ll2_rxq_completion;
1358 comp_tx_cb = qed_ll2_txq_completion;
1362 qed_int_register_cb(p_hwfn, comp_rx_cb,
1363 &p_hwfn->p_ll2_info[i],
1364 &p_ll2_info->rx_queue.rx_sb_index,
1365 &p_ll2_info->rx_queue.p_fw_cons);
1366 p_ll2_info->rx_queue.b_cb_registred = true;
1370 qed_int_register_cb(p_hwfn,
1372 &p_hwfn->p_ll2_info[i],
1373 &p_ll2_info->tx_queue.tx_sb_index,
1374 &p_ll2_info->tx_queue.p_fw_cons);
1375 p_ll2_info->tx_queue.b_cb_registred = true;
1378 *p_connection_handle = i;
1382 qed_ll2_release_connection(p_hwfn, i);
1386 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1387 struct qed_ll2_info *p_ll2_conn)
1389 u8 action_on_error = 0;
1391 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1394 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1396 SET_FIELD(action_on_error,
1397 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
1398 p_ll2_conn->conn.ai_err_packet_too_big);
1399 SET_FIELD(action_on_error,
1400 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
1402 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1405 int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1407 struct qed_ll2_info *p_ll2_conn;
1408 struct qed_ll2_rx_queue *p_rx;
1409 struct qed_ll2_tx_queue *p_tx;
1410 struct qed_ptt *p_ptt;
1415 p_ptt = qed_ptt_acquire(p_hwfn);
1419 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1425 p_rx = &p_ll2_conn->rx_queue;
1426 p_tx = &p_ll2_conn->tx_queue;
1428 qed_chain_reset(&p_rx->rxq_chain);
1429 qed_chain_reset(&p_rx->rcq_chain);
1430 INIT_LIST_HEAD(&p_rx->active_descq);
1431 INIT_LIST_HEAD(&p_rx->free_descq);
1432 INIT_LIST_HEAD(&p_rx->posting_descq);
1433 spin_lock_init(&p_rx->lock);
1434 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1435 for (i = 0; i < capacity; i++)
1436 list_add_tail(&p_rx->descq_array[i].list_entry,
1438 *p_rx->p_fw_cons = 0;
1440 qed_chain_reset(&p_tx->txq_chain);
1441 INIT_LIST_HEAD(&p_tx->active_descq);
1442 INIT_LIST_HEAD(&p_tx->free_descq);
1443 INIT_LIST_HEAD(&p_tx->sending_descq);
1444 spin_lock_init(&p_tx->lock);
1445 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1446 for (i = 0; i < capacity; i++)
1447 list_add_tail(&p_tx->descq_array[i].list_entry,
1449 p_tx->cur_completing_bd_idx = 0;
1451 p_tx->b_completing_packet = false;
1452 p_tx->cur_send_packet = NULL;
1453 p_tx->cur_send_frag_num = 0;
1454 p_tx->cur_completing_frag_num = 0;
1455 *p_tx->p_fw_cons = 0;
1457 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1461 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1462 p_ll2_conn->queue_id = qid;
1463 p_ll2_conn->tx_stats_id = qid;
1464 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1465 GTT_BAR0_MAP_REG_TSDM_RAM +
1466 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1467 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1468 qed_db_addr(p_ll2_conn->cid,
1471 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1475 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1479 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
1480 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
1482 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1484 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1485 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1487 QED_LLH_FILTER_ETHERTYPE);
1488 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1490 QED_LLH_FILTER_ETHERTYPE);
1494 qed_ptt_release(p_hwfn, p_ptt);
1498 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1499 struct qed_ll2_rx_queue *p_rx,
1500 struct qed_ll2_rx_packet *p_curp)
1502 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1503 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1504 bool b_notify_fw = false;
1505 u16 bd_prod, cq_prod;
1507 /* This handles the flushing of already posted buffers */
1508 while (!list_empty(&p_rx->posting_descq)) {
1509 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1510 struct qed_ll2_rx_packet,
1512 list_move_tail(&p_posting_packet->list_entry,
1513 &p_rx->active_descq);
1517 /* This handles the supplied packet [if there is one] */
1519 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1526 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1527 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1528 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1529 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1530 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1533 int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1534 u8 connection_handle,
1536 u16 buf_len, void *cookie, u8 notify_fw)
1538 struct core_rx_bd_with_buff_len *p_curb = NULL;
1539 struct qed_ll2_rx_packet *p_curp = NULL;
1540 struct qed_ll2_info *p_ll2_conn;
1541 struct qed_ll2_rx_queue *p_rx;
1542 unsigned long flags;
1546 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1549 p_rx = &p_ll2_conn->rx_queue;
1551 spin_lock_irqsave(&p_rx->lock, flags);
1552 if (!list_empty(&p_rx->free_descq))
1553 p_curp = list_first_entry(&p_rx->free_descq,
1554 struct qed_ll2_rx_packet, list_entry);
1556 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1557 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1558 p_data = qed_chain_produce(&p_rx->rxq_chain);
1559 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1560 qed_chain_produce(&p_rx->rcq_chain);
1564 /* If we're lacking entires, let's try to flush buffers to FW */
1565 if (!p_curp || !p_curb) {
1571 /* We have an Rx packet we can fill */
1572 DMA_REGPAIR_LE(p_curb->addr, addr);
1573 p_curb->buff_length = cpu_to_le16(buf_len);
1574 p_curp->rx_buf_addr = addr;
1575 p_curp->cookie = cookie;
1576 p_curp->rxq_bd = p_curb;
1577 p_curp->buf_length = buf_len;
1578 list_del(&p_curp->list_entry);
1580 /* Check if we only want to enqueue this packet without informing FW */
1582 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1587 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1589 spin_unlock_irqrestore(&p_rx->lock, flags);
1593 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1594 struct qed_ll2_tx_queue *p_tx,
1595 struct qed_ll2_tx_packet *p_curp,
1597 dma_addr_t first_frag,
1598 u16 first_frag_len, void *p_cookie,
1601 list_del(&p_curp->list_entry);
1602 p_curp->cookie = p_cookie;
1603 p_curp->bd_used = num_of_bds;
1604 p_curp->notify_fw = notify_fw;
1605 p_tx->cur_send_packet = p_curp;
1606 p_tx->cur_send_frag_num = 0;
1608 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1609 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1610 p_tx->cur_send_frag_num++;
1614 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1615 struct qed_ll2_info *p_ll2,
1616 struct qed_ll2_tx_packet *p_curp,
1618 enum core_tx_dest tx_dest,
1621 u16 l4_hdr_offset_w,
1622 enum core_roce_flavor_type roce_flavor,
1623 dma_addr_t first_frag,
1626 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1627 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1628 struct core_tx_bd *start_bd = NULL;
1629 u16 bd_data = 0, frag_idx;
1631 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1632 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1633 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1634 cpu_to_le16(l4_hdr_offset_w));
1635 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1636 bd_data |= bd_flags;
1637 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1638 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
1639 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1640 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1641 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1642 start_bd->nbytes = cpu_to_le16(first_frag_len);
1645 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1646 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1649 p_ll2->conn.conn_type,
1653 le32_to_cpu(start_bd->addr.hi),
1654 le32_to_cpu(start_bd->addr.lo));
1656 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1659 /* Need to provide the packet with additional BDs for frags */
1660 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1661 frag_idx < num_of_bds; frag_idx++) {
1662 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1664 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1665 (*p_bd)->bd_data.as_bitfield = 0;
1666 (*p_bd)->bitfield1 = 0;
1667 p_curp->bds_set[frag_idx].tx_frag = 0;
1668 p_curp->bds_set[frag_idx].frag_len = 0;
1672 /* This should be called while the Txq spinlock is being held */
1673 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1674 struct qed_ll2_info *p_ll2_conn)
1676 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1677 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1678 struct qed_ll2_tx_packet *p_pkt = NULL;
1679 struct core_db_data db_msg = { 0, 0, 0 };
1682 /* If there are missing BDs, don't do anything now */
1683 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1684 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1687 /* Push the current packet to the list and clean after it */
1688 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1689 &p_ll2_conn->tx_queue.sending_descq);
1690 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1691 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1693 /* Notify FW of packet only if requested to */
1697 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1699 while (!list_empty(&p_tx->sending_descq)) {
1700 p_pkt = list_first_entry(&p_tx->sending_descq,
1701 struct qed_ll2_tx_packet, list_entry);
1705 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1708 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1709 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1710 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1711 DQ_XCM_CORE_TX_BD_PROD_CMD);
1712 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1713 db_msg.spq_prod = cpu_to_le16(bd_prod);
1715 /* Make sure the BDs data is updated before ringing the doorbell */
1718 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1721 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1722 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1723 p_ll2_conn->queue_id,
1724 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
1727 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1728 u8 connection_handle,
1732 u16 l4_hdr_offset_w,
1733 enum qed_ll2_tx_dest e_tx_dest,
1734 enum qed_ll2_roce_flavor_type qed_roce_flavor,
1735 dma_addr_t first_frag,
1736 u16 first_frag_len, void *cookie, u8 notify_fw)
1738 struct qed_ll2_tx_packet *p_curp = NULL;
1739 struct qed_ll2_info *p_ll2_conn = NULL;
1740 enum core_roce_flavor_type roce_flavor;
1741 struct qed_ll2_tx_queue *p_tx;
1742 struct qed_chain *p_tx_chain;
1743 enum core_tx_dest tx_dest;
1744 unsigned long flags;
1747 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1750 p_tx = &p_ll2_conn->tx_queue;
1751 p_tx_chain = &p_tx->txq_chain;
1753 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1756 spin_lock_irqsave(&p_tx->lock, flags);
1757 if (p_tx->cur_send_packet) {
1762 /* Get entry, but only if we have tx elements for it */
1763 if (!list_empty(&p_tx->free_descq))
1764 p_curp = list_first_entry(&p_tx->free_descq,
1765 struct qed_ll2_tx_packet, list_entry);
1766 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1774 tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
1776 if (qed_roce_flavor == QED_LL2_ROCE) {
1777 roce_flavor = CORE_ROCE;
1778 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1779 roce_flavor = CORE_RROCE;
1785 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1786 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1787 num_of_bds, first_frag,
1788 first_frag_len, cookie, notify_fw);
1789 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1790 num_of_bds, tx_dest,
1791 vlan, bd_flags, l4_hdr_offset_w,
1793 first_frag, first_frag_len);
1795 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1798 spin_unlock_irqrestore(&p_tx->lock, flags);
1802 int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1803 u8 connection_handle,
1804 dma_addr_t addr, u16 nbytes)
1806 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1807 struct qed_ll2_info *p_ll2_conn = NULL;
1808 u16 cur_send_frag_num = 0;
1809 struct core_tx_bd *p_bd;
1810 unsigned long flags;
1812 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1816 if (!p_ll2_conn->tx_queue.cur_send_packet)
1819 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1820 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1822 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1825 /* Fill the BD information, and possibly notify FW */
1826 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1827 DMA_REGPAIR_LE(p_bd->addr, addr);
1828 p_bd->nbytes = cpu_to_le16(nbytes);
1829 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1830 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1832 p_ll2_conn->tx_queue.cur_send_frag_num++;
1834 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1835 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1836 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1841 int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1843 struct qed_ll2_info *p_ll2_conn = NULL;
1845 struct qed_ptt *p_ptt;
1847 p_ptt = qed_ptt_acquire(p_hwfn);
1851 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1857 /* Stop Tx & Rx of connection, if needed */
1858 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1859 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1862 qed_ll2_txq_flush(p_hwfn, connection_handle);
1865 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1866 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1869 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1872 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1873 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1875 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1876 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1878 QED_LLH_FILTER_ETHERTYPE);
1879 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1881 QED_LLH_FILTER_ETHERTYPE);
1885 qed_ptt_release(p_hwfn, p_ptt);
1889 void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1891 struct qed_ll2_info *p_ll2_conn = NULL;
1893 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1897 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1898 p_ll2_conn->rx_queue.b_cb_registred = false;
1899 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1902 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1903 p_ll2_conn->tx_queue.b_cb_registred = false;
1904 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1907 kfree(p_ll2_conn->tx_queue.descq_array);
1908 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1910 kfree(p_ll2_conn->rx_queue.descq_array);
1911 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1912 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1914 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1916 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1918 mutex_lock(&p_ll2_conn->mutex);
1919 p_ll2_conn->b_active = false;
1920 mutex_unlock(&p_ll2_conn->mutex);
1923 int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1925 struct qed_ll2_info *p_ll2_connections;
1928 /* Allocate LL2's set struct */
1929 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1930 sizeof(struct qed_ll2_info), GFP_KERNEL);
1931 if (!p_ll2_connections) {
1932 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1936 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1937 p_ll2_connections[i].my_id = i;
1939 p_hwfn->p_ll2_info = p_ll2_connections;
1943 void qed_ll2_setup(struct qed_hwfn *p_hwfn)
1947 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1948 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
1951 void qed_ll2_free(struct qed_hwfn *p_hwfn)
1953 if (!p_hwfn->p_ll2_info)
1956 kfree(p_hwfn->p_ll2_info);
1957 p_hwfn->p_ll2_info = NULL;
1960 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1961 struct qed_ptt *p_ptt,
1962 struct qed_ll2_info *p_ll2_conn,
1963 struct qed_ll2_stats *p_stats)
1965 struct core_ll2_tstorm_per_queue_stat tstats;
1966 u8 qid = p_ll2_conn->queue_id;
1969 memset(&tstats, 0, sizeof(tstats));
1970 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1971 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1972 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1974 p_stats->packet_too_big_discard =
1975 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1976 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1979 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1980 struct qed_ptt *p_ptt,
1981 struct qed_ll2_info *p_ll2_conn,
1982 struct qed_ll2_stats *p_stats)
1984 struct core_ll2_ustorm_per_queue_stat ustats;
1985 u8 qid = p_ll2_conn->queue_id;
1988 memset(&ustats, 0, sizeof(ustats));
1989 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1990 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1991 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1993 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1994 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1995 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1996 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1997 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1998 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
2001 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2002 struct qed_ptt *p_ptt,
2003 struct qed_ll2_info *p_ll2_conn,
2004 struct qed_ll2_stats *p_stats)
2006 struct core_ll2_pstorm_per_queue_stat pstats;
2007 u8 stats_id = p_ll2_conn->tx_stats_id;
2010 memset(&pstats, 0, sizeof(pstats));
2011 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2012 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2013 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2015 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2016 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2017 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2018 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2019 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2020 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2023 int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
2024 u8 connection_handle, struct qed_ll2_stats *p_stats)
2026 struct qed_ll2_info *p_ll2_conn = NULL;
2027 struct qed_ptt *p_ptt;
2029 memset(p_stats, 0, sizeof(*p_stats));
2031 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2032 !p_hwfn->p_ll2_info)
2035 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2037 p_ptt = qed_ptt_acquire(p_hwfn);
2039 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2043 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2044 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2045 if (p_ll2_conn->tx_stats_en)
2046 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2048 qed_ptt_release(p_hwfn, p_ptt);
2052 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2053 const struct qed_ll2_cb_ops *ops,
2056 cdev->ll2->cbs = ops;
2057 cdev->ll2->cb_cookie = cookie;
2060 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2062 struct qed_ll2_conn ll2_info;
2063 struct qed_ll2_buffer *buffer, *tmp_buffer;
2064 enum qed_ll2_conn_type conn_type;
2065 struct qed_ptt *p_ptt;
2069 /* Initialize LL2 locks & lists */
2070 INIT_LIST_HEAD(&cdev->ll2->list);
2071 spin_lock_init(&cdev->ll2->lock);
2072 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2073 L1_CACHE_BYTES + params->mtu;
2074 cdev->ll2->frags_mapped = params->frags_mapped;
2076 /*Allocate memory for LL2 */
2077 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2078 cdev->ll2->rx_size);
2079 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2080 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2082 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2086 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2087 &buffer->phys_addr);
2093 list_add_tail(&buffer->list, &cdev->ll2->list);
2096 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
2098 conn_type = QED_LL2_TYPE_FCOE;
2102 conn_type = QED_LL2_TYPE_ISCSI;
2105 case QED_PCI_ETH_ROCE:
2106 conn_type = QED_LL2_TYPE_ROCE;
2109 conn_type = QED_LL2_TYPE_TEST;
2112 /* Prepare the temporary ll2 information */
2113 memset(&ll2_info, 0, sizeof(ll2_info));
2115 ll2_info.conn_type = conn_type;
2116 ll2_info.mtu = params->mtu;
2117 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2118 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2120 ll2_info.tx_dest = CORE_TX_DEST_NW;
2121 ll2_info.gsi_enable = gsi_enable;
2123 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2124 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2125 &cdev->ll2->handle);
2127 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2131 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2134 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2138 /* Post all Rx buffers to FW */
2139 spin_lock_bh(&cdev->ll2->lock);
2140 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2141 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2143 buffer->phys_addr, 0, buffer, 1);
2146 "Failed to post an Rx buffer; Deleting it\n");
2147 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2148 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2149 kfree(buffer->data);
2150 list_del(&buffer->list);
2153 cdev->ll2->rx_cnt++;
2156 spin_unlock_bh(&cdev->ll2->lock);
2158 if (!cdev->ll2->rx_cnt) {
2159 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2160 goto release_terminate;
2163 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2164 DP_INFO(cdev, "Invalid Ethernet address\n");
2165 goto release_terminate;
2168 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2169 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2170 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2171 rc = qed_ll2_start_ooo(cdev, params);
2174 "Failed to initialize the OOO LL2 queue\n");
2175 goto release_terminate;
2179 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2181 DP_INFO(cdev, "Failed to acquire PTT\n");
2182 goto release_terminate;
2185 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2186 params->ll2_mac_address);
2187 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2189 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2190 goto release_terminate_all;
2193 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2196 release_terminate_all:
2199 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2201 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2203 qed_ll2_kill_buffers(cdev);
2204 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2208 static int qed_ll2_stop(struct qed_dev *cdev)
2210 struct qed_ptt *p_ptt;
2213 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2216 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2218 DP_INFO(cdev, "Failed to acquire PTT\n");
2222 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2223 cdev->ll2_mac_address);
2224 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2225 eth_zero_addr(cdev->ll2_mac_address);
2227 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2228 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2229 qed_ll2_stop_ooo(cdev);
2231 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2234 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2236 qed_ll2_kill_buffers(cdev);
2238 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2239 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2246 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2248 const skb_frag_t *frag;
2249 int rc = -EINVAL, i;
2254 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2255 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2259 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2260 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2261 1 + skb_shinfo(skb)->nr_frags);
2265 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2266 skb->len, DMA_TO_DEVICE);
2267 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2268 DP_NOTICE(cdev, "SKB mapping failed\n");
2272 /* Request HW to calculate IP csum */
2273 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2274 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2275 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2277 if (skb_vlan_tag_present(skb)) {
2278 vlan = skb_vlan_tag_get(skb);
2279 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2282 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
2284 1 + skb_shinfo(skb)->nr_frags,
2285 vlan, flags, 0, QED_LL2_TX_DEST_NW,
2286 0 /* RoCE FLAVOR */,
2287 mapping, skb->len, skb, 1);
2291 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2292 frag = &skb_shinfo(skb)->frags[i];
2293 if (!cdev->ll2->frags_mapped) {
2294 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2295 skb_frag_size(frag),
2298 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2301 "Unable to map frag - dropping packet\n");
2306 mapping = page_to_phys(skb_frag_page(frag)) |
2310 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2313 skb_frag_size(frag));
2315 /* if failed not much to do here, partial packet has been posted
2316 * we can't free memory, will need to wait for completion.
2325 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2331 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2336 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2337 cdev->ll2->handle, stats);
2340 const struct qed_ll2_ops qed_ll2_ops_pass = {
2341 .start = &qed_ll2_start,
2342 .stop = &qed_ll2_stop,
2343 .start_xmit = &qed_ll2_start_xmit,
2344 .register_cb_ops = &qed_ll2_register_cb_ops,
2345 .get_stats = &qed_ll2_stats,
2348 int qed_ll2_alloc_if(struct qed_dev *cdev)
2350 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2351 return cdev->ll2 ? 0 : -ENOMEM;
2354 void qed_ll2_dealloc_if(struct qed_dev *cdev)