]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qed/qed_ll2.c
ARM: configs: Add new config fragment to change RAM start point
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qed / qed_ll2.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/version.h>
42 #include <linux/workqueue.h>
43 #include <net/ipv6.h>
44 #include <linux/bitops.h>
45 #include <linux/delay.h>
46 #include <linux/errno.h>
47 #include <linux/etherdevice.h>
48 #include <linux/io.h>
49 #include <linux/list.h>
50 #include <linux/mutex.h>
51 #include <linux/spinlock.h>
52 #include <linux/string.h>
53 #include <linux/qed/qed_ll2_if.h>
54 #include "qed.h"
55 #include "qed_cxt.h"
56 #include "qed_dev_api.h"
57 #include "qed_hsi.h"
58 #include "qed_hw.h"
59 #include "qed_int.h"
60 #include "qed_ll2.h"
61 #include "qed_mcp.h"
62 #include "qed_ooo.h"
63 #include "qed_reg_addr.h"
64 #include "qed_sp.h"
65 #include "qed_roce.h"
66
67 #define QED_LL2_RX_REGISTERED(ll2)      ((ll2)->rx_queue.b_cb_registred)
68 #define QED_LL2_TX_REGISTERED(ll2)      ((ll2)->tx_queue.b_cb_registred)
69
70 #define QED_LL2_TX_SIZE (256)
71 #define QED_LL2_RX_SIZE (4096)
72
73 struct qed_cb_ll2_info {
74         int rx_cnt;
75         u32 rx_size;
76         u8 handle;
77         bool frags_mapped;
78
79         /* Lock protecting LL2 buffer lists in sleepless context */
80         spinlock_t lock;
81         struct list_head list;
82
83         const struct qed_ll2_cb_ops *cbs;
84         void *cb_cookie;
85 };
86
87 struct qed_ll2_buffer {
88         struct list_head list;
89         void *data;
90         dma_addr_t phys_addr;
91 };
92
93 static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
94                                         u8 connection_handle,
95                                         void *cookie,
96                                         dma_addr_t first_frag_addr,
97                                         bool b_last_fragment,
98                                         bool b_last_packet)
99 {
100         struct qed_dev *cdev = p_hwfn->cdev;
101         struct sk_buff *skb = cookie;
102
103         /* All we need to do is release the mapping */
104         dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
105                          skb_headlen(skb), DMA_TO_DEVICE);
106
107         if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
108                 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
109                                       b_last_fragment);
110
111         if (cdev->ll2->frags_mapped)
112                 /* Case where mapped frags were received, need to
113                  * free skb with nr_frags marked as 0
114                  */
115                 skb_shinfo(skb)->nr_frags = 0;
116
117         dev_kfree_skb_any(skb);
118 }
119
120 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
121                                 u8 **data, dma_addr_t *phys_addr)
122 {
123         *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124         if (!(*data)) {
125                 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
126                 return -ENOMEM;
127         }
128
129         *phys_addr = dma_map_single(&cdev->pdev->dev,
130                                     ((*data) + NET_SKB_PAD),
131                                     cdev->ll2->rx_size, DMA_FROM_DEVICE);
132         if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
133                 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134                 kfree((*data));
135                 return -ENOMEM;
136         }
137
138         return 0;
139 }
140
141 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
142                                  struct qed_ll2_buffer *buffer)
143 {
144         spin_lock_bh(&cdev->ll2->lock);
145
146         dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
147                          cdev->ll2->rx_size, DMA_FROM_DEVICE);
148         kfree(buffer->data);
149         list_del(&buffer->list);
150
151         cdev->ll2->rx_cnt--;
152         if (!cdev->ll2->rx_cnt)
153                 DP_INFO(cdev, "All LL2 entries were removed\n");
154
155         spin_unlock_bh(&cdev->ll2->lock);
156
157         return 0;
158 }
159
160 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161 {
162         struct qed_ll2_buffer *buffer, *tmp_buffer;
163
164         list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
165                 qed_ll2_dealloc_buffer(cdev, buffer);
166 }
167
168 static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
169                                         u8 connection_handle,
170                                         struct qed_ll2_rx_packet *p_pkt,
171                                         struct core_rx_fast_path_cqe *p_cqe,
172                                         bool b_last_packet)
173 {
174         u16 packet_length = le16_to_cpu(p_cqe->packet_length);
175         struct qed_ll2_buffer *buffer = p_pkt->cookie;
176         struct qed_dev *cdev = p_hwfn->cdev;
177         u16 vlan = le16_to_cpu(p_cqe->vlan);
178         u32 opaque_data_0, opaque_data_1;
179         u8 pad = p_cqe->placement_offset;
180         dma_addr_t new_phys_addr;
181         struct sk_buff *skb;
182         bool reuse = false;
183         int rc = -EINVAL;
184         u8 *new_data;
185
186         opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
187         opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
188
189         DP_VERBOSE(p_hwfn,
190                    (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
191                    "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
192                    (u64)p_pkt->rx_buf_addr, pad, packet_length,
193                    le16_to_cpu(p_cqe->parse_flags.flags), vlan,
194                    opaque_data_0, opaque_data_1);
195
196         if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
197                 print_hex_dump(KERN_INFO, "",
198                                DUMP_PREFIX_OFFSET, 16, 1,
199                                buffer->data, packet_length, false);
200         }
201
202         /* Determine if data is valid */
203         if (packet_length < ETH_HLEN)
204                 reuse = true;
205
206         /* Allocate a replacement for buffer; Reuse upon failure */
207         if (!reuse)
208                 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
209                                           &new_phys_addr);
210
211         /* If need to reuse or there's no replacement buffer, repost this */
212         if (rc)
213                 goto out_post;
214
215         skb = build_skb(buffer->data, 0);
216         if (!skb) {
217                 rc = -ENOMEM;
218                 goto out_post;
219         }
220
221         pad += NET_SKB_PAD;
222         skb_reserve(skb, pad);
223         skb_put(skb, packet_length);
224         skb_checksum_none_assert(skb);
225
226         /* Get parital ethernet information instead of eth_type_trans(),
227          * Since we don't have an associated net_device.
228          */
229         skb_reset_mac_header(skb);
230         skb->protocol = eth_hdr(skb)->h_proto;
231
232         /* Pass SKB onward */
233         if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
234                 if (vlan)
235                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
236                 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
237                                       opaque_data_0, opaque_data_1);
238         }
239
240         /* Update Buffer information and update FW producer */
241         buffer->data = new_data;
242         buffer->phys_addr = new_phys_addr;
243
244 out_post:
245         rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
246                                     buffer->phys_addr, 0,  buffer, 1);
247
248         if (rc)
249                 qed_ll2_dealloc_buffer(cdev, buffer);
250 }
251
252 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
253                                                     u8 connection_handle,
254                                                     bool b_lock,
255                                                     bool b_only_active)
256 {
257         struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
258
259         if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
260                 return NULL;
261
262         if (!p_hwfn->p_ll2_info)
263                 return NULL;
264
265         p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
266
267         if (b_only_active) {
268                 if (b_lock)
269                         mutex_lock(&p_ll2_conn->mutex);
270                 if (p_ll2_conn->b_active)
271                         p_ret = p_ll2_conn;
272                 if (b_lock)
273                         mutex_unlock(&p_ll2_conn->mutex);
274         } else {
275                 p_ret = p_ll2_conn;
276         }
277
278         return p_ret;
279 }
280
281 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
282                                                   u8 connection_handle)
283 {
284         return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
285 }
286
287 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
288                                                        u8 connection_handle)
289 {
290         return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
291 }
292
293 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
294                                                            *p_hwfn,
295                                                            u8 connection_handle)
296 {
297         return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
298 }
299
300 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
301 {
302         bool b_last_packet = false, b_last_frag = false;
303         struct qed_ll2_tx_packet *p_pkt = NULL;
304         struct qed_ll2_info *p_ll2_conn;
305         struct qed_ll2_tx_queue *p_tx;
306         dma_addr_t tx_frag;
307
308         p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
309         if (!p_ll2_conn)
310                 return;
311
312         p_tx = &p_ll2_conn->tx_queue;
313
314         while (!list_empty(&p_tx->active_descq)) {
315                 p_pkt = list_first_entry(&p_tx->active_descq,
316                                          struct qed_ll2_tx_packet, list_entry);
317                 if (!p_pkt)
318                         break;
319
320                 list_del(&p_pkt->list_entry);
321                 b_last_packet = list_empty(&p_tx->active_descq);
322                 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
323                 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
324                         struct qed_ooo_buffer *p_buffer;
325
326                         p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
327                         qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
328                                                 p_buffer);
329                 } else {
330                         p_tx->cur_completing_packet = *p_pkt;
331                         p_tx->cur_completing_bd_idx = 1;
332                         b_last_frag =
333                                 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
334                         tx_frag = p_pkt->bds_set[0].tx_frag;
335                         if (p_ll2_conn->conn.gsi_enable)
336                                 qed_ll2b_release_tx_gsi_packet(p_hwfn,
337                                                                p_ll2_conn->
338                                                                my_id,
339                                                                p_pkt->cookie,
340                                                                tx_frag,
341                                                                b_last_frag,
342                                                                b_last_packet);
343                         else
344                                 qed_ll2b_complete_tx_packet(p_hwfn,
345                                                             p_ll2_conn->my_id,
346                                                             p_pkt->cookie,
347                                                             tx_frag,
348                                                             b_last_frag,
349                                                             b_last_packet);
350                 }
351         }
352 }
353
354 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
355 {
356         struct qed_ll2_info *p_ll2_conn = p_cookie;
357         struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
358         u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
359         struct qed_ll2_tx_packet *p_pkt;
360         bool b_last_frag = false;
361         unsigned long flags;
362         dma_addr_t tx_frag;
363         int rc = -EINVAL;
364
365         spin_lock_irqsave(&p_tx->lock, flags);
366         if (p_tx->b_completing_packet) {
367                 rc = -EBUSY;
368                 goto out;
369         }
370
371         new_idx = le16_to_cpu(*p_tx->p_fw_cons);
372         num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
373         while (num_bds) {
374                 if (list_empty(&p_tx->active_descq))
375                         goto out;
376
377                 p_pkt = list_first_entry(&p_tx->active_descq,
378                                          struct qed_ll2_tx_packet, list_entry);
379                 if (!p_pkt)
380                         goto out;
381
382                 p_tx->b_completing_packet = true;
383                 p_tx->cur_completing_packet = *p_pkt;
384                 num_bds_in_packet = p_pkt->bd_used;
385                 list_del(&p_pkt->list_entry);
386
387                 if (num_bds < num_bds_in_packet) {
388                         DP_NOTICE(p_hwfn,
389                                   "Rest of BDs does not cover whole packet\n");
390                         goto out;
391                 }
392
393                 num_bds -= num_bds_in_packet;
394                 p_tx->bds_idx += num_bds_in_packet;
395                 while (num_bds_in_packet--)
396                         qed_chain_consume(&p_tx->txq_chain);
397
398                 p_tx->cur_completing_bd_idx = 1;
399                 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
400                 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
401
402                 spin_unlock_irqrestore(&p_tx->lock, flags);
403                 tx_frag = p_pkt->bds_set[0].tx_frag;
404                 if (p_ll2_conn->conn.gsi_enable)
405                         qed_ll2b_complete_tx_gsi_packet(p_hwfn,
406                                                         p_ll2_conn->my_id,
407                                                         p_pkt->cookie,
408                                                         tx_frag,
409                                                         b_last_frag, !num_bds);
410                 else
411                         qed_ll2b_complete_tx_packet(p_hwfn,
412                                                     p_ll2_conn->my_id,
413                                                     p_pkt->cookie,
414                                                     tx_frag,
415                                                     b_last_frag, !num_bds);
416                 spin_lock_irqsave(&p_tx->lock, flags);
417         }
418
419         p_tx->b_completing_packet = false;
420         rc = 0;
421 out:
422         spin_unlock_irqrestore(&p_tx->lock, flags);
423         return rc;
424 }
425
426 static int
427 qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
428                            struct qed_ll2_info *p_ll2_info,
429                            union core_rx_cqe_union *p_cqe,
430                            unsigned long lock_flags, bool b_last_cqe)
431 {
432         struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
433         struct qed_ll2_rx_packet *p_pkt = NULL;
434         u16 packet_length, parse_flags, vlan;
435         u32 src_mac_addrhi;
436         u16 src_mac_addrlo;
437
438         if (!list_empty(&p_rx->active_descq))
439                 p_pkt = list_first_entry(&p_rx->active_descq,
440                                          struct qed_ll2_rx_packet, list_entry);
441         if (!p_pkt) {
442                 DP_NOTICE(p_hwfn,
443                           "GSI Rx completion but active_descq is empty\n");
444                 return -EIO;
445         }
446
447         list_del(&p_pkt->list_entry);
448         parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
449         packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
450         vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
451         src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
452         src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
453         if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
454                 DP_NOTICE(p_hwfn,
455                           "Mismatch between active_descq and the LL2 Rx chain\n");
456         list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
457
458         spin_unlock_irqrestore(&p_rx->lock, lock_flags);
459         qed_ll2b_complete_rx_gsi_packet(p_hwfn,
460                                         p_ll2_info->my_id,
461                                         p_pkt->cookie,
462                                         p_pkt->rx_buf_addr,
463                                         packet_length,
464                                         p_cqe->rx_cqe_gsi.data_length_error,
465                                         parse_flags,
466                                         vlan,
467                                         src_mac_addrhi,
468                                         src_mac_addrlo, b_last_cqe);
469         spin_lock_irqsave(&p_rx->lock, lock_flags);
470
471         return 0;
472 }
473
474 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
475                                       struct qed_ll2_info *p_ll2_conn,
476                                       union core_rx_cqe_union *p_cqe,
477                                       unsigned long lock_flags,
478                                       bool b_last_cqe)
479 {
480         struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
481         struct qed_ll2_rx_packet *p_pkt = NULL;
482
483         if (!list_empty(&p_rx->active_descq))
484                 p_pkt = list_first_entry(&p_rx->active_descq,
485                                          struct qed_ll2_rx_packet, list_entry);
486         if (!p_pkt) {
487                 DP_NOTICE(p_hwfn,
488                           "LL2 Rx completion but active_descq is empty\n");
489                 return -EIO;
490         }
491         list_del(&p_pkt->list_entry);
492
493         if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
494                 DP_NOTICE(p_hwfn,
495                           "Mismatch between active_descq and the LL2 Rx chain\n");
496         list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
497
498         spin_unlock_irqrestore(&p_rx->lock, lock_flags);
499         qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
500                                     p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
501         spin_lock_irqsave(&p_rx->lock, lock_flags);
502
503         return 0;
504 }
505
506 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
507 {
508         struct qed_ll2_info *p_ll2_conn = cookie;
509         struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
510         union core_rx_cqe_union *cqe = NULL;
511         u16 cq_new_idx = 0, cq_old_idx = 0;
512         unsigned long flags = 0;
513         int rc = 0;
514
515         spin_lock_irqsave(&p_rx->lock, flags);
516         cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
517         cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
518
519         while (cq_new_idx != cq_old_idx) {
520                 bool b_last_cqe = (cq_new_idx == cq_old_idx);
521
522                 cqe = qed_chain_consume(&p_rx->rcq_chain);
523                 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
524
525                 DP_VERBOSE(p_hwfn,
526                            QED_MSG_LL2,
527                            "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
528                            cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
529
530                 switch (cqe->rx_cqe_sp.type) {
531                 case CORE_RX_CQE_TYPE_SLOW_PATH:
532                         DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
533                         rc = -EINVAL;
534                         break;
535                 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
536                         rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
537                                                         cqe, flags, b_last_cqe);
538                         break;
539                 case CORE_RX_CQE_TYPE_REGULAR:
540                         rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
541                                                         cqe, flags, b_last_cqe);
542                         break;
543                 default:
544                         rc = -EIO;
545                 }
546         }
547
548         spin_unlock_irqrestore(&p_rx->lock, flags);
549         return rc;
550 }
551
552 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
553 {
554         struct qed_ll2_info *p_ll2_conn = NULL;
555         struct qed_ll2_rx_packet *p_pkt = NULL;
556         struct qed_ll2_rx_queue *p_rx;
557
558         p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
559         if (!p_ll2_conn)
560                 return;
561
562         p_rx = &p_ll2_conn->rx_queue;
563
564         while (!list_empty(&p_rx->active_descq)) {
565                 dma_addr_t rx_buf_addr;
566                 void *cookie;
567                 bool b_last;
568
569                 p_pkt = list_first_entry(&p_rx->active_descq,
570                                          struct qed_ll2_rx_packet, list_entry);
571                 if (!p_pkt)
572                         break;
573
574                 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
575
576                 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
577                         struct qed_ooo_buffer *p_buffer;
578
579                         p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
580                         qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
581                                                 p_buffer);
582                 } else {
583                         rx_buf_addr = p_pkt->rx_buf_addr;
584                         cookie = p_pkt->cookie;
585
586                         b_last = list_empty(&p_rx->active_descq);
587                 }
588         }
589 }
590
591 #if IS_ENABLED(CONFIG_QED_ISCSI)
592 static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
593 {
594         u8 bd_flags = 0;
595
596         if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
597                 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
598
599         return bd_flags;
600 }
601
602 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
603                                   struct qed_ll2_info *p_ll2_conn)
604 {
605         struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
606         u16 packet_length = 0, parse_flags = 0, vlan = 0;
607         struct qed_ll2_rx_packet *p_pkt = NULL;
608         u32 num_ooo_add_to_peninsula = 0, cid;
609         union core_rx_cqe_union *cqe = NULL;
610         u16 cq_new_idx = 0, cq_old_idx = 0;
611         struct qed_ooo_buffer *p_buffer;
612         struct ooo_opaque *iscsi_ooo;
613         u8 placement_offset = 0;
614         u8 cqe_type;
615
616         cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
617         cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
618         if (cq_new_idx == cq_old_idx)
619                 return 0;
620
621         while (cq_new_idx != cq_old_idx) {
622                 struct core_rx_fast_path_cqe *p_cqe_fp;
623
624                 cqe = qed_chain_consume(&p_rx->rcq_chain);
625                 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
626                 cqe_type = cqe->rx_cqe_sp.type;
627
628                 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
629                         DP_NOTICE(p_hwfn,
630                                   "Got a non-regular LB LL2 completion [type 0x%02x]\n",
631                                   cqe_type);
632                         return -EINVAL;
633                 }
634                 p_cqe_fp = &cqe->rx_cqe_fp;
635
636                 placement_offset = p_cqe_fp->placement_offset;
637                 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
638                 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
639                 vlan = le16_to_cpu(p_cqe_fp->vlan);
640                 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
641                 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
642                                            iscsi_ooo);
643                 cid = le32_to_cpu(iscsi_ooo->cid);
644
645                 /* Process delete isle first */
646                 if (iscsi_ooo->drop_size)
647                         qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
648                                              iscsi_ooo->drop_isle,
649                                              iscsi_ooo->drop_size);
650
651                 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
652                         continue;
653
654                 /* Now process create/add/join isles */
655                 if (list_empty(&p_rx->active_descq)) {
656                         DP_NOTICE(p_hwfn,
657                                   "LL2 OOO RX chain has no submitted buffers\n"
658                                   );
659                         return -EIO;
660                 }
661
662                 p_pkt = list_first_entry(&p_rx->active_descq,
663                                          struct qed_ll2_rx_packet, list_entry);
664
665                 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
666                     (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
667                     (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
668                     (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
669                     (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
670                         if (!p_pkt) {
671                                 DP_NOTICE(p_hwfn,
672                                           "LL2 OOO RX packet is not valid\n");
673                                 return -EIO;
674                         }
675                         list_del(&p_pkt->list_entry);
676                         p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
677                         p_buffer->packet_length = packet_length;
678                         p_buffer->parse_flags = parse_flags;
679                         p_buffer->vlan = vlan;
680                         p_buffer->placement_offset = placement_offset;
681                         qed_chain_consume(&p_rx->rxq_chain);
682                         list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
683
684                         switch (iscsi_ooo->ooo_opcode) {
685                         case TCP_EVENT_ADD_NEW_ISLE:
686                                 qed_ooo_add_new_isle(p_hwfn,
687                                                      p_hwfn->p_ooo_info,
688                                                      cid,
689                                                      iscsi_ooo->ooo_isle,
690                                                      p_buffer);
691                                 break;
692                         case TCP_EVENT_ADD_ISLE_RIGHT:
693                                 qed_ooo_add_new_buffer(p_hwfn,
694                                                        p_hwfn->p_ooo_info,
695                                                        cid,
696                                                        iscsi_ooo->ooo_isle,
697                                                        p_buffer,
698                                                        QED_OOO_RIGHT_BUF);
699                                 break;
700                         case TCP_EVENT_ADD_ISLE_LEFT:
701                                 qed_ooo_add_new_buffer(p_hwfn,
702                                                        p_hwfn->p_ooo_info,
703                                                        cid,
704                                                        iscsi_ooo->ooo_isle,
705                                                        p_buffer,
706                                                        QED_OOO_LEFT_BUF);
707                                 break;
708                         case TCP_EVENT_JOIN:
709                                 qed_ooo_add_new_buffer(p_hwfn,
710                                                        p_hwfn->p_ooo_info,
711                                                        cid,
712                                                        iscsi_ooo->ooo_isle +
713                                                        1,
714                                                        p_buffer,
715                                                        QED_OOO_LEFT_BUF);
716                                 qed_ooo_join_isles(p_hwfn,
717                                                    p_hwfn->p_ooo_info,
718                                                    cid, iscsi_ooo->ooo_isle);
719                                 break;
720                         case TCP_EVENT_ADD_PEN:
721                                 num_ooo_add_to_peninsula++;
722                                 qed_ooo_put_ready_buffer(p_hwfn,
723                                                          p_hwfn->p_ooo_info,
724                                                          p_buffer, true);
725                                 break;
726                         }
727                 } else {
728                         DP_NOTICE(p_hwfn,
729                                   "Unexpected event (%d) TX OOO completion\n",
730                                   iscsi_ooo->ooo_opcode);
731                 }
732         }
733
734         return 0;
735 }
736
737 static void
738 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
739                           struct qed_ll2_info *p_ll2_conn)
740 {
741         struct qed_ooo_buffer *p_buffer;
742         int rc;
743         u16 l4_hdr_offset_w;
744         dma_addr_t first_frag;
745         u16 parse_flags;
746         u8 bd_flags;
747
748         /* Submit Tx buffers here */
749         while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
750                                                     p_hwfn->p_ooo_info))) {
751                 l4_hdr_offset_w = 0;
752                 bd_flags = 0;
753
754                 first_frag = p_buffer->rx_buffer_phys_addr +
755                              p_buffer->placement_offset;
756                 parse_flags = p_buffer->parse_flags;
757                 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
758                 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
759                 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
760
761                 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
762                                                p_buffer->vlan, bd_flags,
763                                                l4_hdr_offset_w,
764                                                p_ll2_conn->conn.tx_dest, 0,
765                                                first_frag,
766                                                p_buffer->packet_length,
767                                                p_buffer, true);
768                 if (rc) {
769                         qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
770                                                  p_buffer, false);
771                         break;
772                 }
773         }
774 }
775
776 static void
777 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
778                           struct qed_ll2_info *p_ll2_conn)
779 {
780         struct qed_ooo_buffer *p_buffer;
781         int rc;
782
783         while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
784                                                    p_hwfn->p_ooo_info))) {
785                 rc = qed_ll2_post_rx_buffer(p_hwfn,
786                                             p_ll2_conn->my_id,
787                                             p_buffer->rx_buffer_phys_addr,
788                                             0, p_buffer, true);
789                 if (rc) {
790                         qed_ooo_put_free_buffer(p_hwfn,
791                                                 p_hwfn->p_ooo_info, p_buffer);
792                         break;
793                 }
794         }
795 }
796
797 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
798 {
799         struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
800         int rc;
801
802         rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
803         if (rc)
804                 return rc;
805
806         qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
807         qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
808
809         return 0;
810 }
811
812 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
813 {
814         struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
815         struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
816         struct qed_ll2_tx_packet *p_pkt = NULL;
817         struct qed_ooo_buffer *p_buffer;
818         bool b_dont_submit_rx = false;
819         u16 new_idx = 0, num_bds = 0;
820         int rc;
821
822         new_idx = le16_to_cpu(*p_tx->p_fw_cons);
823         num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
824
825         if (!num_bds)
826                 return 0;
827
828         while (num_bds) {
829                 if (list_empty(&p_tx->active_descq))
830                         return -EINVAL;
831
832                 p_pkt = list_first_entry(&p_tx->active_descq,
833                                          struct qed_ll2_tx_packet, list_entry);
834                 if (!p_pkt)
835                         return -EINVAL;
836
837                 if (p_pkt->bd_used != 1) {
838                         DP_NOTICE(p_hwfn,
839                                   "Unexpectedly many BDs(%d) in TX OOO completion\n",
840                                   p_pkt->bd_used);
841                         return -EINVAL;
842                 }
843
844                 list_del(&p_pkt->list_entry);
845
846                 num_bds--;
847                 p_tx->bds_idx++;
848                 qed_chain_consume(&p_tx->txq_chain);
849
850                 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
851                 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
852
853                 if (b_dont_submit_rx) {
854                         qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
855                                                 p_buffer);
856                         continue;
857                 }
858
859                 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
860                                             p_buffer->rx_buffer_phys_addr, 0,
861                                             p_buffer, true);
862                 if (rc != 0) {
863                         qed_ooo_put_free_buffer(p_hwfn,
864                                                 p_hwfn->p_ooo_info, p_buffer);
865                         b_dont_submit_rx = true;
866                 }
867         }
868
869         qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
870
871         return 0;
872 }
873
874 static int
875 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
876                                struct qed_ll2_info *p_ll2_info,
877                                u16 rx_num_ooo_buffers, u16 mtu)
878 {
879         struct qed_ooo_buffer *p_buf = NULL;
880         void *p_virt;
881         u16 buf_idx;
882         int rc = 0;
883
884         if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
885                 return rc;
886
887         if (!rx_num_ooo_buffers)
888                 return -EINVAL;
889
890         for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
891                 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
892                 if (!p_buf) {
893                         rc = -ENOMEM;
894                         goto out;
895                 }
896
897                 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
898                 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
899                                          ETH_CACHE_LINE_SIZE - 1) &
900                                         ~(ETH_CACHE_LINE_SIZE - 1);
901                 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
902                                             p_buf->rx_buffer_size,
903                                             &p_buf->rx_buffer_phys_addr,
904                                             GFP_KERNEL);
905                 if (!p_virt) {
906                         kfree(p_buf);
907                         rc = -ENOMEM;
908                         goto out;
909                 }
910
911                 p_buf->rx_buffer_virt_addr = p_virt;
912                 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
913         }
914
915         DP_VERBOSE(p_hwfn, QED_MSG_LL2,
916                    "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
917                    rx_num_ooo_buffers, p_buf->rx_buffer_size);
918
919 out:
920         return rc;
921 }
922
923 static void
924 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
925                                  struct qed_ll2_info *p_ll2_conn)
926 {
927         if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
928                 return;
929
930         qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
931         qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
932 }
933
934 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
935                                            struct qed_ll2_info *p_ll2_conn)
936 {
937         struct qed_ooo_buffer *p_buffer;
938
939         if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
940                 return;
941
942         qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
943         while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
944                                                    p_hwfn->p_ooo_info))) {
945                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
946                                   p_buffer->rx_buffer_size,
947                                   p_buffer->rx_buffer_virt_addr,
948                                   p_buffer->rx_buffer_phys_addr);
949                 kfree(p_buffer);
950         }
951 }
952
953 static void qed_ll2_stop_ooo(struct qed_dev *cdev)
954 {
955         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
956         u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
957
958         DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
959                    *handle);
960
961         qed_ll2_terminate_connection(hwfn, *handle);
962         qed_ll2_release_connection(hwfn, *handle);
963         *handle = QED_LL2_UNUSED_HANDLE;
964 }
965
966 static int qed_ll2_start_ooo(struct qed_dev *cdev,
967                              struct qed_ll2_params *params)
968 {
969         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
970         u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
971         struct qed_ll2_conn ll2_info;
972         int rc;
973
974         ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
975         ll2_info.mtu = params->mtu;
976         ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
977         ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
978         ll2_info.tx_tc = OOO_LB_TC;
979         ll2_info.tx_dest = CORE_TX_DEST_LB;
980
981         rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
982                                         QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
983                                         handle);
984         if (rc) {
985                 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
986                 goto out;
987         }
988
989         rc = qed_ll2_establish_connection(hwfn, *handle);
990         if (rc) {
991                 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
992                 goto fail;
993         }
994
995         return 0;
996
997 fail:
998         qed_ll2_release_connection(hwfn, *handle);
999 out:
1000         *handle = QED_LL2_UNUSED_HANDLE;
1001         return rc;
1002 }
1003 #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1004 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1005                                      void *p_cookie) { return -EINVAL; }
1006 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1007                                      void *p_cookie) { return -EINVAL; }
1008 static inline int
1009 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1010                                struct qed_ll2_info *p_ll2_info,
1011                                u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1012 static inline void
1013 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1014                                  struct qed_ll2_info *p_ll2_conn) { return; }
1015 static inline void
1016 qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1017                                struct qed_ll2_info *p_ll2_conn) { return; }
1018 static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1019 static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1020                                     struct qed_ll2_params *params)
1021                                     { return -EINVAL; }
1022 #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1023
1024 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1025                                      struct qed_ll2_info *p_ll2_conn,
1026                                      u8 action_on_error)
1027 {
1028         enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1029         struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1030         struct core_rx_start_ramrod_data *p_ramrod = NULL;
1031         struct qed_spq_entry *p_ent = NULL;
1032         struct qed_sp_init_data init_data;
1033         u16 cqe_pbl_size;
1034         int rc = 0;
1035
1036         /* Get SPQ entry */
1037         memset(&init_data, 0, sizeof(init_data));
1038         init_data.cid = p_ll2_conn->cid;
1039         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1040         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1041
1042         rc = qed_sp_init_request(p_hwfn, &p_ent,
1043                                  CORE_RAMROD_RX_QUEUE_START,
1044                                  PROTOCOLID_CORE, &init_data);
1045         if (rc)
1046                 return rc;
1047
1048         p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1049
1050         p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1051         p_ramrod->sb_index = p_rx->rx_sb_index;
1052         p_ramrod->complete_event_flg = 1;
1053
1054         p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1055         DMA_REGPAIR_LE(p_ramrod->bd_base,
1056                        p_rx->rxq_chain.p_phys_addr);
1057         cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1058         p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1059         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1060                        qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1061
1062         p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1063         p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
1064         p_ramrod->queue_id = p_ll2_conn->queue_id;
1065         p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1066                                                                           : 1;
1067
1068         if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1069             p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1070                 p_ramrod->mf_si_bcast_accept_all = 1;
1071                 p_ramrod->mf_si_mcast_accept_all = 1;
1072         } else {
1073                 p_ramrod->mf_si_bcast_accept_all = 0;
1074                 p_ramrod->mf_si_mcast_accept_all = 0;
1075         }
1076
1077         p_ramrod->action_on_error.error_type = action_on_error;
1078         p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1079         return qed_spq_post(p_hwfn, p_ent, NULL);
1080 }
1081
1082 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1083                                      struct qed_ll2_info *p_ll2_conn)
1084 {
1085         enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1086         struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1087         struct core_tx_start_ramrod_data *p_ramrod = NULL;
1088         struct qed_spq_entry *p_ent = NULL;
1089         struct qed_sp_init_data init_data;
1090         union qed_qm_pq_params pq_params;
1091         u16 pq_id = 0, pbl_size;
1092         int rc = -EINVAL;
1093
1094         if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1095                 return 0;
1096
1097         if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1098                 p_ll2_conn->tx_stats_en = 0;
1099         else
1100                 p_ll2_conn->tx_stats_en = 1;
1101
1102         /* Get SPQ entry */
1103         memset(&init_data, 0, sizeof(init_data));
1104         init_data.cid = p_ll2_conn->cid;
1105         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1106         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1107
1108         rc = qed_sp_init_request(p_hwfn, &p_ent,
1109                                  CORE_RAMROD_TX_QUEUE_START,
1110                                  PROTOCOLID_CORE, &init_data);
1111         if (rc)
1112                 return rc;
1113
1114         p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1115
1116         p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1117         p_ramrod->sb_index = p_tx->tx_sb_index;
1118         p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1119         p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1120         p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1121
1122         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1123                        qed_chain_get_pbl_phys(&p_tx->txq_chain));
1124         pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1125         p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1126
1127         memset(&pq_params, 0, sizeof(pq_params));
1128         pq_params.core.tc = p_ll2_conn->conn.tx_tc;
1129         pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1130         p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1131
1132         switch (conn_type) {
1133         case QED_LL2_TYPE_FCOE:
1134                 p_ramrod->conn_type = PROTOCOLID_FCOE;
1135                 break;
1136         case QED_LL2_TYPE_ISCSI:
1137         case QED_LL2_TYPE_ISCSI_OOO:
1138                 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1139                 break;
1140         case QED_LL2_TYPE_ROCE:
1141                 p_ramrod->conn_type = PROTOCOLID_ROCE;
1142                 break;
1143         default:
1144                 p_ramrod->conn_type = PROTOCOLID_ETH;
1145                 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1146         }
1147
1148         p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1149         return qed_spq_post(p_hwfn, p_ent, NULL);
1150 }
1151
1152 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1153                                     struct qed_ll2_info *p_ll2_conn)
1154 {
1155         struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1156         struct qed_spq_entry *p_ent = NULL;
1157         struct qed_sp_init_data init_data;
1158         int rc = -EINVAL;
1159
1160         /* Get SPQ entry */
1161         memset(&init_data, 0, sizeof(init_data));
1162         init_data.cid = p_ll2_conn->cid;
1163         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1164         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1165
1166         rc = qed_sp_init_request(p_hwfn, &p_ent,
1167                                  CORE_RAMROD_RX_QUEUE_STOP,
1168                                  PROTOCOLID_CORE, &init_data);
1169         if (rc)
1170                 return rc;
1171
1172         p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1173
1174         p_ramrod->complete_event_flg = 1;
1175         p_ramrod->queue_id = p_ll2_conn->queue_id;
1176
1177         return qed_spq_post(p_hwfn, p_ent, NULL);
1178 }
1179
1180 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1181                                     struct qed_ll2_info *p_ll2_conn)
1182 {
1183         struct qed_spq_entry *p_ent = NULL;
1184         struct qed_sp_init_data init_data;
1185         int rc = -EINVAL;
1186
1187         /* Get SPQ entry */
1188         memset(&init_data, 0, sizeof(init_data));
1189         init_data.cid = p_ll2_conn->cid;
1190         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1191         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1192
1193         rc = qed_sp_init_request(p_hwfn, &p_ent,
1194                                  CORE_RAMROD_TX_QUEUE_STOP,
1195                                  PROTOCOLID_CORE, &init_data);
1196         if (rc)
1197                 return rc;
1198
1199         return qed_spq_post(p_hwfn, p_ent, NULL);
1200 }
1201
1202 static int
1203 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1204                               struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1205 {
1206         struct qed_ll2_rx_packet *p_descq;
1207         u32 capacity;
1208         int rc = 0;
1209
1210         if (!rx_num_desc)
1211                 goto out;
1212
1213         rc = qed_chain_alloc(p_hwfn->cdev,
1214                              QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1215                              QED_CHAIN_MODE_NEXT_PTR,
1216                              QED_CHAIN_CNT_TYPE_U16,
1217                              rx_num_desc,
1218                              sizeof(struct core_rx_bd),
1219                              &p_ll2_info->rx_queue.rxq_chain);
1220         if (rc) {
1221                 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1222                 goto out;
1223         }
1224
1225         capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1226         p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1227                           GFP_KERNEL);
1228         if (!p_descq) {
1229                 rc = -ENOMEM;
1230                 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1231                 goto out;
1232         }
1233         p_ll2_info->rx_queue.descq_array = p_descq;
1234
1235         rc = qed_chain_alloc(p_hwfn->cdev,
1236                              QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1237                              QED_CHAIN_MODE_PBL,
1238                              QED_CHAIN_CNT_TYPE_U16,
1239                              rx_num_desc,
1240                              sizeof(struct core_rx_fast_path_cqe),
1241                              &p_ll2_info->rx_queue.rcq_chain);
1242         if (rc) {
1243                 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1244                 goto out;
1245         }
1246
1247         DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1248                    "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1249                    p_ll2_info->conn.conn_type, rx_num_desc);
1250
1251 out:
1252         return rc;
1253 }
1254
1255 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1256                                          struct qed_ll2_info *p_ll2_info,
1257                                          u16 tx_num_desc)
1258 {
1259         struct qed_ll2_tx_packet *p_descq;
1260         u32 capacity;
1261         int rc = 0;
1262
1263         if (!tx_num_desc)
1264                 goto out;
1265
1266         rc = qed_chain_alloc(p_hwfn->cdev,
1267                              QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1268                              QED_CHAIN_MODE_PBL,
1269                              QED_CHAIN_CNT_TYPE_U16,
1270                              tx_num_desc,
1271                              sizeof(struct core_tx_bd),
1272                              &p_ll2_info->tx_queue.txq_chain);
1273         if (rc)
1274                 goto out;
1275
1276         capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1277         p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1278                           GFP_KERNEL);
1279         if (!p_descq) {
1280                 rc = -ENOMEM;
1281                 goto out;
1282         }
1283         p_ll2_info->tx_queue.descq_array = p_descq;
1284
1285         DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1286                    "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1287                    p_ll2_info->conn.conn_type, tx_num_desc);
1288
1289 out:
1290         if (rc)
1291                 DP_NOTICE(p_hwfn,
1292                           "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1293                           tx_num_desc);
1294         return rc;
1295 }
1296
1297 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1298                                struct qed_ll2_conn *p_params,
1299                                u16 rx_num_desc,
1300                                u16 tx_num_desc,
1301                                u8 *p_connection_handle)
1302 {
1303         qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1304         struct qed_ll2_info *p_ll2_info = NULL;
1305         int rc;
1306         u8 i;
1307
1308         if (!p_connection_handle || !p_hwfn->p_ll2_info)
1309                 return -EINVAL;
1310
1311         /* Find a free connection to be used */
1312         for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1313                 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1314                 if (p_hwfn->p_ll2_info[i].b_active) {
1315                         mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1316                         continue;
1317                 }
1318
1319                 p_hwfn->p_ll2_info[i].b_active = true;
1320                 p_ll2_info = &p_hwfn->p_ll2_info[i];
1321                 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1322                 break;
1323         }
1324         if (!p_ll2_info)
1325                 return -EBUSY;
1326
1327         p_ll2_info->conn = *p_params;
1328
1329         rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1330         if (rc)
1331                 goto q_allocate_fail;
1332
1333         rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1334         if (rc)
1335                 goto q_allocate_fail;
1336
1337         rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1338                                             rx_num_desc * 2, p_params->mtu);
1339         if (rc)
1340                 goto q_allocate_fail;
1341
1342         /* Register callbacks for the Rx/Tx queues */
1343         if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1344                 comp_rx_cb = qed_ll2_lb_rxq_completion;
1345                 comp_tx_cb = qed_ll2_lb_txq_completion;
1346         } else {
1347                 comp_rx_cb = qed_ll2_rxq_completion;
1348                 comp_tx_cb = qed_ll2_txq_completion;
1349         }
1350
1351         if (rx_num_desc) {
1352                 qed_int_register_cb(p_hwfn, comp_rx_cb,
1353                                     &p_hwfn->p_ll2_info[i],
1354                                     &p_ll2_info->rx_queue.rx_sb_index,
1355                                     &p_ll2_info->rx_queue.p_fw_cons);
1356                 p_ll2_info->rx_queue.b_cb_registred = true;
1357         }
1358
1359         if (tx_num_desc) {
1360                 qed_int_register_cb(p_hwfn,
1361                                     comp_tx_cb,
1362                                     &p_hwfn->p_ll2_info[i],
1363                                     &p_ll2_info->tx_queue.tx_sb_index,
1364                                     &p_ll2_info->tx_queue.p_fw_cons);
1365                 p_ll2_info->tx_queue.b_cb_registred = true;
1366         }
1367
1368         *p_connection_handle = i;
1369         return rc;
1370
1371 q_allocate_fail:
1372         qed_ll2_release_connection(p_hwfn, i);
1373         return -ENOMEM;
1374 }
1375
1376 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1377                                            struct qed_ll2_info *p_ll2_conn)
1378 {
1379         u8 action_on_error = 0;
1380
1381         if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1382                 return 0;
1383
1384         DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1385
1386         SET_FIELD(action_on_error,
1387                   CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
1388                   p_ll2_conn->conn.ai_err_packet_too_big);
1389         SET_FIELD(action_on_error,
1390                   CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
1391
1392         return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1393 }
1394
1395 int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1396 {
1397         struct qed_ll2_info *p_ll2_conn;
1398         struct qed_ll2_rx_queue *p_rx;
1399         struct qed_ll2_tx_queue *p_tx;
1400         int rc = -EINVAL;
1401         u32 i, capacity;
1402         u8 qid;
1403
1404         p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1405         if (!p_ll2_conn)
1406                 return -EINVAL;
1407         p_rx = &p_ll2_conn->rx_queue;
1408         p_tx = &p_ll2_conn->tx_queue;
1409
1410         qed_chain_reset(&p_rx->rxq_chain);
1411         qed_chain_reset(&p_rx->rcq_chain);
1412         INIT_LIST_HEAD(&p_rx->active_descq);
1413         INIT_LIST_HEAD(&p_rx->free_descq);
1414         INIT_LIST_HEAD(&p_rx->posting_descq);
1415         spin_lock_init(&p_rx->lock);
1416         capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1417         for (i = 0; i < capacity; i++)
1418                 list_add_tail(&p_rx->descq_array[i].list_entry,
1419                               &p_rx->free_descq);
1420         *p_rx->p_fw_cons = 0;
1421
1422         qed_chain_reset(&p_tx->txq_chain);
1423         INIT_LIST_HEAD(&p_tx->active_descq);
1424         INIT_LIST_HEAD(&p_tx->free_descq);
1425         INIT_LIST_HEAD(&p_tx->sending_descq);
1426         spin_lock_init(&p_tx->lock);
1427         capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1428         for (i = 0; i < capacity; i++)
1429                 list_add_tail(&p_tx->descq_array[i].list_entry,
1430                               &p_tx->free_descq);
1431         p_tx->cur_completing_bd_idx = 0;
1432         p_tx->bds_idx = 0;
1433         p_tx->b_completing_packet = false;
1434         p_tx->cur_send_packet = NULL;
1435         p_tx->cur_send_frag_num = 0;
1436         p_tx->cur_completing_frag_num = 0;
1437         *p_tx->p_fw_cons = 0;
1438
1439         qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1440
1441         qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1442         p_ll2_conn->queue_id = qid;
1443         p_ll2_conn->tx_stats_id = qid;
1444         p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1445                                             GTT_BAR0_MAP_REG_TSDM_RAM +
1446                                             TSTORM_LL2_RX_PRODS_OFFSET(qid);
1447         p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1448                                             qed_db_addr(p_ll2_conn->cid,
1449                                                         DQ_DEMS_LEGACY);
1450
1451         rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1452         if (rc)
1453                 return rc;
1454
1455         rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1456         if (rc)
1457                 return rc;
1458
1459         if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
1460                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
1461
1462         qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1463
1464         if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1465                 qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1466                                             0x8906, 0,
1467                                             QED_LLH_FILTER_ETHERTYPE);
1468                 qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1469                                             0x8914, 0,
1470                                             QED_LLH_FILTER_ETHERTYPE);
1471         }
1472
1473         return rc;
1474 }
1475
1476 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1477                                              struct qed_ll2_rx_queue *p_rx,
1478                                              struct qed_ll2_rx_packet *p_curp)
1479 {
1480         struct qed_ll2_rx_packet *p_posting_packet = NULL;
1481         struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1482         bool b_notify_fw = false;
1483         u16 bd_prod, cq_prod;
1484
1485         /* This handles the flushing of already posted buffers */
1486         while (!list_empty(&p_rx->posting_descq)) {
1487                 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1488                                                     struct qed_ll2_rx_packet,
1489                                                     list_entry);
1490                 list_move_tail(&p_posting_packet->list_entry,
1491                                &p_rx->active_descq);
1492                 b_notify_fw = true;
1493         }
1494
1495         /* This handles the supplied packet [if there is one] */
1496         if (p_curp) {
1497                 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1498                 b_notify_fw = true;
1499         }
1500
1501         if (!b_notify_fw)
1502                 return;
1503
1504         bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1505         cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1506         rx_prod.bd_prod = cpu_to_le16(bd_prod);
1507         rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1508         DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1509 }
1510
1511 int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1512                            u8 connection_handle,
1513                            dma_addr_t addr,
1514                            u16 buf_len, void *cookie, u8 notify_fw)
1515 {
1516         struct core_rx_bd_with_buff_len *p_curb = NULL;
1517         struct qed_ll2_rx_packet *p_curp = NULL;
1518         struct qed_ll2_info *p_ll2_conn;
1519         struct qed_ll2_rx_queue *p_rx;
1520         unsigned long flags;
1521         void *p_data;
1522         int rc = 0;
1523
1524         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1525         if (!p_ll2_conn)
1526                 return -EINVAL;
1527         p_rx = &p_ll2_conn->rx_queue;
1528
1529         spin_lock_irqsave(&p_rx->lock, flags);
1530         if (!list_empty(&p_rx->free_descq))
1531                 p_curp = list_first_entry(&p_rx->free_descq,
1532                                           struct qed_ll2_rx_packet, list_entry);
1533         if (p_curp) {
1534                 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1535                     qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1536                         p_data = qed_chain_produce(&p_rx->rxq_chain);
1537                         p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1538                         qed_chain_produce(&p_rx->rcq_chain);
1539                 }
1540         }
1541
1542         /* If we're lacking entires, let's try to flush buffers to FW */
1543         if (!p_curp || !p_curb) {
1544                 rc = -EBUSY;
1545                 p_curp = NULL;
1546                 goto out_notify;
1547         }
1548
1549         /* We have an Rx packet we can fill */
1550         DMA_REGPAIR_LE(p_curb->addr, addr);
1551         p_curb->buff_length = cpu_to_le16(buf_len);
1552         p_curp->rx_buf_addr = addr;
1553         p_curp->cookie = cookie;
1554         p_curp->rxq_bd = p_curb;
1555         p_curp->buf_length = buf_len;
1556         list_del(&p_curp->list_entry);
1557
1558         /* Check if we only want to enqueue this packet without informing FW */
1559         if (!notify_fw) {
1560                 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1561                 goto out;
1562         }
1563
1564 out_notify:
1565         qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1566 out:
1567         spin_unlock_irqrestore(&p_rx->lock, flags);
1568         return rc;
1569 }
1570
1571 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1572                                           struct qed_ll2_tx_queue *p_tx,
1573                                           struct qed_ll2_tx_packet *p_curp,
1574                                           u8 num_of_bds,
1575                                           dma_addr_t first_frag,
1576                                           u16 first_frag_len, void *p_cookie,
1577                                           u8 notify_fw)
1578 {
1579         list_del(&p_curp->list_entry);
1580         p_curp->cookie = p_cookie;
1581         p_curp->bd_used = num_of_bds;
1582         p_curp->notify_fw = notify_fw;
1583         p_tx->cur_send_packet = p_curp;
1584         p_tx->cur_send_frag_num = 0;
1585
1586         p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1587         p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1588         p_tx->cur_send_frag_num++;
1589 }
1590
1591 static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1592                                              struct qed_ll2_info *p_ll2,
1593                                              struct qed_ll2_tx_packet *p_curp,
1594                                              u8 num_of_bds,
1595                                              enum core_tx_dest tx_dest,
1596                                              u16 vlan,
1597                                              u8 bd_flags,
1598                                              u16 l4_hdr_offset_w,
1599                                              enum core_roce_flavor_type type,
1600                                              dma_addr_t first_frag,
1601                                              u16 first_frag_len)
1602 {
1603         struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1604         u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1605         struct core_tx_bd *start_bd = NULL;
1606         u16 frag_idx;
1607
1608         start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1609         start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1610         SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1611                   cpu_to_le16(l4_hdr_offset_w));
1612         SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1613         start_bd->bd_flags.as_bitfield = bd_flags;
1614         start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1615             CORE_TX_BD_FLAGS_START_BD_SHIFT;
1616         SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1617         SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
1618         DMA_REGPAIR_LE(start_bd->addr, first_frag);
1619         start_bd->nbytes = cpu_to_le16(first_frag_len);
1620
1621         DP_VERBOSE(p_hwfn,
1622                    (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1623                    "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1624                    p_ll2->queue_id,
1625                    p_ll2->cid,
1626                    p_ll2->conn.conn_type,
1627                    prod_idx,
1628                    first_frag_len,
1629                    num_of_bds,
1630                    le32_to_cpu(start_bd->addr.hi),
1631                    le32_to_cpu(start_bd->addr.lo));
1632
1633         if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1634                 return;
1635
1636         /* Need to provide the packet with additional BDs for frags */
1637         for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1638              frag_idx < num_of_bds; frag_idx++) {
1639                 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1640
1641                 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1642                 (*p_bd)->bd_flags.as_bitfield = 0;
1643                 (*p_bd)->bitfield1 = 0;
1644                 (*p_bd)->bitfield0 = 0;
1645                 p_curp->bds_set[frag_idx].tx_frag = 0;
1646                 p_curp->bds_set[frag_idx].frag_len = 0;
1647         }
1648 }
1649
1650 /* This should be called while the Txq spinlock is being held */
1651 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1652                                      struct qed_ll2_info *p_ll2_conn)
1653 {
1654         bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1655         struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1656         struct qed_ll2_tx_packet *p_pkt = NULL;
1657         struct core_db_data db_msg = { 0, 0, 0 };
1658         u16 bd_prod;
1659
1660         /* If there are missing BDs, don't do anything now */
1661         if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1662             p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1663                 return;
1664
1665         /* Push the current packet to the list and clean after it */
1666         list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1667                       &p_ll2_conn->tx_queue.sending_descq);
1668         p_ll2_conn->tx_queue.cur_send_packet = NULL;
1669         p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1670
1671         /* Notify FW of packet only if requested to */
1672         if (!b_notify)
1673                 return;
1674
1675         bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1676
1677         while (!list_empty(&p_tx->sending_descq)) {
1678                 p_pkt = list_first_entry(&p_tx->sending_descq,
1679                                          struct qed_ll2_tx_packet, list_entry);
1680                 if (!p_pkt)
1681                         break;
1682
1683                 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1684         }
1685
1686         SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1687         SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1688         SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1689                   DQ_XCM_CORE_TX_BD_PROD_CMD);
1690         db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1691         db_msg.spq_prod = cpu_to_le16(bd_prod);
1692
1693         /* Make sure the BDs data is updated before ringing the doorbell */
1694         wmb();
1695
1696         DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1697
1698         DP_VERBOSE(p_hwfn,
1699                    (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1700                    "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1701                    p_ll2_conn->queue_id,
1702                    p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
1703 }
1704
1705 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1706                               u8 connection_handle,
1707                               u8 num_of_bds,
1708                               u16 vlan,
1709                               u8 bd_flags,
1710                               u16 l4_hdr_offset_w,
1711                               enum qed_ll2_tx_dest e_tx_dest,
1712                               enum qed_ll2_roce_flavor_type qed_roce_flavor,
1713                               dma_addr_t first_frag,
1714                               u16 first_frag_len, void *cookie, u8 notify_fw)
1715 {
1716         struct qed_ll2_tx_packet *p_curp = NULL;
1717         struct qed_ll2_info *p_ll2_conn = NULL;
1718         enum core_roce_flavor_type roce_flavor;
1719         struct qed_ll2_tx_queue *p_tx;
1720         struct qed_chain *p_tx_chain;
1721         enum core_tx_dest tx_dest;
1722         unsigned long flags;
1723         int rc = 0;
1724
1725         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1726         if (!p_ll2_conn)
1727                 return -EINVAL;
1728         p_tx = &p_ll2_conn->tx_queue;
1729         p_tx_chain = &p_tx->txq_chain;
1730
1731         if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1732                 return -EIO;
1733
1734         spin_lock_irqsave(&p_tx->lock, flags);
1735         if (p_tx->cur_send_packet) {
1736                 rc = -EEXIST;
1737                 goto out;
1738         }
1739
1740         /* Get entry, but only if we have tx elements for it */
1741         if (!list_empty(&p_tx->free_descq))
1742                 p_curp = list_first_entry(&p_tx->free_descq,
1743                                           struct qed_ll2_tx_packet, list_entry);
1744         if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1745                 p_curp = NULL;
1746
1747         if (!p_curp) {
1748                 rc = -EBUSY;
1749                 goto out;
1750         }
1751
1752         tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
1753                                                     CORE_TX_DEST_LB;
1754         if (qed_roce_flavor == QED_LL2_ROCE) {
1755                 roce_flavor = CORE_ROCE;
1756         } else if (qed_roce_flavor == QED_LL2_RROCE) {
1757                 roce_flavor = CORE_RROCE;
1758         } else {
1759                 rc = -EINVAL;
1760                 goto out;
1761         }
1762
1763         /* Prepare packet and BD, and perhaps send a doorbell to FW */
1764         qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1765                                       num_of_bds, first_frag,
1766                                       first_frag_len, cookie, notify_fw);
1767         qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1768                                          num_of_bds, tx_dest,
1769                                          vlan, bd_flags, l4_hdr_offset_w,
1770                                          roce_flavor,
1771                                          first_frag, first_frag_len);
1772
1773         qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1774
1775 out:
1776         spin_unlock_irqrestore(&p_tx->lock, flags);
1777         return rc;
1778 }
1779
1780 int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1781                                       u8 connection_handle,
1782                                       dma_addr_t addr, u16 nbytes)
1783 {
1784         struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1785         struct qed_ll2_info *p_ll2_conn = NULL;
1786         u16 cur_send_frag_num = 0;
1787         struct core_tx_bd *p_bd;
1788         unsigned long flags;
1789
1790         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1791         if (!p_ll2_conn)
1792                 return -EINVAL;
1793
1794         if (!p_ll2_conn->tx_queue.cur_send_packet)
1795                 return -EINVAL;
1796
1797         p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1798         cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1799
1800         if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1801                 return -EINVAL;
1802
1803         /* Fill the BD information, and possibly notify FW */
1804         p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1805         DMA_REGPAIR_LE(p_bd->addr, addr);
1806         p_bd->nbytes = cpu_to_le16(nbytes);
1807         p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1808         p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1809
1810         p_ll2_conn->tx_queue.cur_send_frag_num++;
1811
1812         spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1813         qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1814         spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1815
1816         return 0;
1817 }
1818
1819 int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1820 {
1821         struct qed_ll2_info *p_ll2_conn = NULL;
1822         int rc = -EINVAL;
1823
1824         p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1825         if (!p_ll2_conn)
1826                 return -EINVAL;
1827
1828         /* Stop Tx & Rx of connection, if needed */
1829         if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1830                 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1831                 if (rc)
1832                         return rc;
1833                 qed_ll2_txq_flush(p_hwfn, connection_handle);
1834         }
1835
1836         if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1837                 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1838                 if (rc)
1839                         return rc;
1840                 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1841         }
1842
1843         if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1844                 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1845
1846         if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1847                 qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1848                                                0x8906, 0,
1849                                                QED_LLH_FILTER_ETHERTYPE);
1850                 qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1851                                                0x8914, 0,
1852                                                QED_LLH_FILTER_ETHERTYPE);
1853         }
1854
1855         return rc;
1856 }
1857
1858 void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1859 {
1860         struct qed_ll2_info *p_ll2_conn = NULL;
1861
1862         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1863         if (!p_ll2_conn)
1864                 return;
1865
1866         if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1867                 p_ll2_conn->rx_queue.b_cb_registred = false;
1868                 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1869         }
1870
1871         if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1872                 p_ll2_conn->tx_queue.b_cb_registred = false;
1873                 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1874         }
1875
1876         kfree(p_ll2_conn->tx_queue.descq_array);
1877         qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1878
1879         kfree(p_ll2_conn->rx_queue.descq_array);
1880         qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1881         qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1882
1883         qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1884
1885         qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1886
1887         mutex_lock(&p_ll2_conn->mutex);
1888         p_ll2_conn->b_active = false;
1889         mutex_unlock(&p_ll2_conn->mutex);
1890 }
1891
1892 struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1893 {
1894         struct qed_ll2_info *p_ll2_connections;
1895         u8 i;
1896
1897         /* Allocate LL2's set struct */
1898         p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1899                                     sizeof(struct qed_ll2_info), GFP_KERNEL);
1900         if (!p_ll2_connections) {
1901                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1902                 return NULL;
1903         }
1904
1905         for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1906                 p_ll2_connections[i].my_id = i;
1907
1908         return p_ll2_connections;
1909 }
1910
1911 void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1912                    struct qed_ll2_info *p_ll2_connections)
1913 {
1914         int i;
1915
1916         for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1917                 mutex_init(&p_ll2_connections[i].mutex);
1918 }
1919
1920 void qed_ll2_free(struct qed_hwfn *p_hwfn,
1921                   struct qed_ll2_info *p_ll2_connections)
1922 {
1923         kfree(p_ll2_connections);
1924 }
1925
1926 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1927                                 struct qed_ptt *p_ptt,
1928                                 struct qed_ll2_info *p_ll2_conn,
1929                                 struct qed_ll2_stats *p_stats)
1930 {
1931         struct core_ll2_tstorm_per_queue_stat tstats;
1932         u8 qid = p_ll2_conn->queue_id;
1933         u32 tstats_addr;
1934
1935         memset(&tstats, 0, sizeof(tstats));
1936         tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1937                       CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1938         qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1939
1940         p_stats->packet_too_big_discard =
1941                         HILO_64_REGPAIR(tstats.packet_too_big_discard);
1942         p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1943 }
1944
1945 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1946                                 struct qed_ptt *p_ptt,
1947                                 struct qed_ll2_info *p_ll2_conn,
1948                                 struct qed_ll2_stats *p_stats)
1949 {
1950         struct core_ll2_ustorm_per_queue_stat ustats;
1951         u8 qid = p_ll2_conn->queue_id;
1952         u32 ustats_addr;
1953
1954         memset(&ustats, 0, sizeof(ustats));
1955         ustats_addr = BAR0_MAP_REG_USDM_RAM +
1956                       CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1957         qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1958
1959         p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1960         p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1961         p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1962         p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1963         p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1964         p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1965 }
1966
1967 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1968                                 struct qed_ptt *p_ptt,
1969                                 struct qed_ll2_info *p_ll2_conn,
1970                                 struct qed_ll2_stats *p_stats)
1971 {
1972         struct core_ll2_pstorm_per_queue_stat pstats;
1973         u8 stats_id = p_ll2_conn->tx_stats_id;
1974         u32 pstats_addr;
1975
1976         memset(&pstats, 0, sizeof(pstats));
1977         pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1978                       CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1979         qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1980
1981         p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1982         p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1983         p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1984         p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1985         p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1986         p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1987 }
1988
1989 int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1990                       u8 connection_handle, struct qed_ll2_stats *p_stats)
1991 {
1992         struct qed_ll2_info *p_ll2_conn = NULL;
1993         struct qed_ptt *p_ptt;
1994
1995         memset(p_stats, 0, sizeof(*p_stats));
1996
1997         if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1998             !p_hwfn->p_ll2_info)
1999                 return -EINVAL;
2000
2001         p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2002
2003         p_ptt = qed_ptt_acquire(p_hwfn);
2004         if (!p_ptt) {
2005                 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2006                 return -EINVAL;
2007         }
2008
2009         _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2010         _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2011         if (p_ll2_conn->tx_stats_en)
2012                 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2013
2014         qed_ptt_release(p_hwfn, p_ptt);
2015         return 0;
2016 }
2017
2018 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2019                                     const struct qed_ll2_cb_ops *ops,
2020                                     void *cookie)
2021 {
2022         cdev->ll2->cbs = ops;
2023         cdev->ll2->cb_cookie = cookie;
2024 }
2025
2026 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2027 {
2028         struct qed_ll2_conn ll2_info;
2029         struct qed_ll2_buffer *buffer, *tmp_buffer;
2030         enum qed_ll2_conn_type conn_type;
2031         struct qed_ptt *p_ptt;
2032         int rc, i;
2033         u8 gsi_enable = 1;
2034
2035         /* Initialize LL2 locks & lists */
2036         INIT_LIST_HEAD(&cdev->ll2->list);
2037         spin_lock_init(&cdev->ll2->lock);
2038         cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2039                              L1_CACHE_BYTES + params->mtu;
2040         cdev->ll2->frags_mapped = params->frags_mapped;
2041
2042         /*Allocate memory for LL2 */
2043         DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2044                 cdev->ll2->rx_size);
2045         for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2046                 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2047                 if (!buffer) {
2048                         DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2049                         goto fail;
2050                 }
2051
2052                 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2053                                           &buffer->phys_addr);
2054                 if (rc) {
2055                         kfree(buffer);
2056                         goto fail;
2057                 }
2058
2059                 list_add_tail(&buffer->list, &cdev->ll2->list);
2060         }
2061
2062         switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
2063         case QED_PCI_FCOE:
2064                 conn_type = QED_LL2_TYPE_FCOE;
2065                 gsi_enable = 0;
2066                 break;
2067         case QED_PCI_ISCSI:
2068                 conn_type = QED_LL2_TYPE_ISCSI;
2069                 gsi_enable = 0;
2070                 break;
2071         case QED_PCI_ETH_ROCE:
2072                 conn_type = QED_LL2_TYPE_ROCE;
2073                 break;
2074         default:
2075                 conn_type = QED_LL2_TYPE_TEST;
2076         }
2077
2078         /* Prepare the temporary ll2 information */
2079         memset(&ll2_info, 0, sizeof(ll2_info));
2080
2081         ll2_info.conn_type = conn_type;
2082         ll2_info.mtu = params->mtu;
2083         ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2084         ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2085         ll2_info.tx_tc = 0;
2086         ll2_info.tx_dest = CORE_TX_DEST_NW;
2087         ll2_info.gsi_enable = gsi_enable;
2088
2089         rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2090                                         QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2091                                         &cdev->ll2->handle);
2092         if (rc) {
2093                 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2094                 goto fail;
2095         }
2096
2097         rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2098                                           cdev->ll2->handle);
2099         if (rc) {
2100                 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2101                 goto release_fail;
2102         }
2103
2104         /* Post all Rx buffers to FW */
2105         spin_lock_bh(&cdev->ll2->lock);
2106         list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2107                 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2108                                             cdev->ll2->handle,
2109                                             buffer->phys_addr, 0, buffer, 1);
2110                 if (rc) {
2111                         DP_INFO(cdev,
2112                                 "Failed to post an Rx buffer; Deleting it\n");
2113                         dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2114                                          cdev->ll2->rx_size, DMA_FROM_DEVICE);
2115                         kfree(buffer->data);
2116                         list_del(&buffer->list);
2117                         kfree(buffer);
2118                 } else {
2119                         cdev->ll2->rx_cnt++;
2120                 }
2121         }
2122         spin_unlock_bh(&cdev->ll2->lock);
2123
2124         if (!cdev->ll2->rx_cnt) {
2125                 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2126                 goto release_terminate;
2127         }
2128
2129         if (!is_valid_ether_addr(params->ll2_mac_address)) {
2130                 DP_INFO(cdev, "Invalid Ethernet address\n");
2131                 goto release_terminate;
2132         }
2133
2134         if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2135             cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2136                 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2137                 rc = qed_ll2_start_ooo(cdev, params);
2138                 if (rc) {
2139                         DP_INFO(cdev,
2140                                 "Failed to initialize the OOO LL2 queue\n");
2141                         goto release_terminate;
2142                 }
2143         }
2144
2145         p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2146         if (!p_ptt) {
2147                 DP_INFO(cdev, "Failed to acquire PTT\n");
2148                 goto release_terminate;
2149         }
2150
2151         rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2152                                     params->ll2_mac_address);
2153         qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2154         if (rc) {
2155                 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2156                 goto release_terminate_all;
2157         }
2158
2159         ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2160         return 0;
2161
2162 release_terminate_all:
2163
2164 release_terminate:
2165         qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2166 release_fail:
2167         qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2168 fail:
2169         qed_ll2_kill_buffers(cdev);
2170         cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2171         return -EINVAL;
2172 }
2173
2174 static int qed_ll2_stop(struct qed_dev *cdev)
2175 {
2176         struct qed_ptt *p_ptt;
2177         int rc;
2178
2179         if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2180                 return 0;
2181
2182         p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2183         if (!p_ptt) {
2184                 DP_INFO(cdev, "Failed to acquire PTT\n");
2185                 goto fail;
2186         }
2187
2188         qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2189                                   cdev->ll2_mac_address);
2190         qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2191         eth_zero_addr(cdev->ll2_mac_address);
2192
2193         if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2194             cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2195                 qed_ll2_stop_ooo(cdev);
2196
2197         rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2198                                           cdev->ll2->handle);
2199         if (rc)
2200                 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2201
2202         qed_ll2_kill_buffers(cdev);
2203
2204         qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2205         cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2206
2207         return rc;
2208 fail:
2209         return -EINVAL;
2210 }
2211
2212 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2213 {
2214         const skb_frag_t *frag;
2215         int rc = -EINVAL, i;
2216         dma_addr_t mapping;
2217         u16 vlan = 0;
2218         u8 flags = 0;
2219
2220         if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2221                 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2222                 return -EINVAL;
2223         }
2224
2225         if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2226                 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2227                        1 + skb_shinfo(skb)->nr_frags);
2228                 return -EINVAL;
2229         }
2230
2231         mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2232                                  skb->len, DMA_TO_DEVICE);
2233         if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2234                 DP_NOTICE(cdev, "SKB mapping failed\n");
2235                 return -EINVAL;
2236         }
2237
2238         /* Request HW to calculate IP csum */
2239         if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2240               ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2241                 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2242
2243         if (skb_vlan_tag_present(skb)) {
2244                 vlan = skb_vlan_tag_get(skb);
2245                 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
2246         }
2247
2248         rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
2249                                        cdev->ll2->handle,
2250                                        1 + skb_shinfo(skb)->nr_frags,
2251                                        vlan, flags, 0, QED_LL2_TX_DEST_NW,
2252                                        0 /* RoCE FLAVOR */,
2253                                        mapping, skb->len, skb, 1);
2254         if (rc)
2255                 goto err;
2256
2257         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2258                 frag = &skb_shinfo(skb)->frags[i];
2259                 if (!cdev->ll2->frags_mapped) {
2260                         mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2261                                                    skb_frag_size(frag),
2262                                                    DMA_TO_DEVICE);
2263
2264                         if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2265                                                        mapping))) {
2266                                 DP_NOTICE(cdev,
2267                                           "Unable to map frag - dropping packet\n");
2268                                 rc = -ENOMEM;
2269                                 goto err;
2270                         }
2271                 } else {
2272                         mapping = page_to_phys(skb_frag_page(frag)) |
2273                             frag->page_offset;
2274                 }
2275
2276                 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2277                                                        cdev->ll2->handle,
2278                                                        mapping,
2279                                                        skb_frag_size(frag));
2280
2281                 /* if failed not much to do here, partial packet has been posted
2282                  * we can't free memory, will need to wait for completion.
2283                  */
2284                 if (rc)
2285                         goto err2;
2286         }
2287
2288         return 0;
2289
2290 err:
2291         dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2292
2293 err2:
2294         return rc;
2295 }
2296
2297 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2298 {
2299         if (!cdev->ll2)
2300                 return -EINVAL;
2301
2302         return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2303                                  cdev->ll2->handle, stats);
2304 }
2305
2306 const struct qed_ll2_ops qed_ll2_ops_pass = {
2307         .start = &qed_ll2_start,
2308         .stop = &qed_ll2_stop,
2309         .start_xmit = &qed_ll2_start_xmit,
2310         .register_cb_ops = &qed_ll2_register_cb_ops,
2311         .get_stats = &qed_ll2_stats,
2312 };
2313
2314 int qed_ll2_alloc_if(struct qed_dev *cdev)
2315 {
2316         cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2317         return cdev->ll2 ? 0 : -ENOMEM;
2318 }
2319
2320 void qed_ll2_dealloc_if(struct qed_dev *cdev)
2321 {
2322         kfree(cdev->ll2);
2323         cdev->ll2 = NULL;
2324 }