1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
43 #include "i40iw_puda.h"
45 static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
46 struct i40iw_puda_buf *buf);
47 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
48 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
49 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
51 static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
53 * i40iw_puda_get_listbuf - get buffer from puda list
54 * @list: list to use for buffers (ILQ or IEQ)
56 static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
58 struct i40iw_puda_buf *buf = NULL;
60 if (!list_empty(list)) {
61 buf = (struct i40iw_puda_buf *)list->next;
62 list_del((struct list_head *)&buf->list);
68 * i40iw_puda_get_bufpool - return buffer from resource
69 * @rsrc: resource to use for buffer
71 struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
73 struct i40iw_puda_buf *buf = NULL;
74 struct list_head *list = &rsrc->bufpool;
77 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
78 buf = i40iw_puda_get_listbuf(list);
80 rsrc->avail_buf_count--;
82 rsrc->stats_buf_alloc_fail++;
83 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
88 * i40iw_puda_ret_bufpool - return buffer to rsrc list
89 * @rsrc: resource to use for buffer
90 * @buf: buffe to return to resouce
92 void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
93 struct i40iw_puda_buf *buf)
97 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
98 list_add(&buf->list, &rsrc->bufpool);
99 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
100 rsrc->avail_buf_count++;
104 * i40iw_puda_post_recvbuf - set wqe for rcv buffer
105 * @rsrc: resource ptr
106 * @wqe_idx: wqe index to use
107 * @buf: puda buffer for rcv q
108 * @initial: flag if during init time
110 static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
111 struct i40iw_puda_buf *buf, bool initial)
114 struct i40iw_sc_qp *qp = &rsrc->qp;
117 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
118 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
119 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
120 "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
123 get_64bit_val(wqe, 24, &offset24);
125 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
126 set_64bit_val(wqe, 24, offset24);
128 set_64bit_val(wqe, 0, buf->mem.pa);
129 set_64bit_val(wqe, 8,
130 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
131 set_64bit_val(wqe, 24, offset24);
135 * i40iw_puda_replenish_rq - post rcv buffers
136 * @rsrc: resource to use for buffer
137 * @initial: flag if during init time
139 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
143 u32 invalid_cnt = rsrc->rxq_invalid_cnt;
144 struct i40iw_puda_buf *buf = NULL;
146 for (i = 0; i < invalid_cnt; i++) {
147 buf = i40iw_puda_get_bufpool(rsrc);
149 return I40IW_ERR_list_empty;
150 i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
153 ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
154 rsrc->rxq_invalid_cnt--;
160 * i40iw_puda_alloc_buf - allocate mem for buffer
162 * @length: length of buffer
164 static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
167 struct i40iw_puda_buf *buf = NULL;
168 struct i40iw_virt_mem buf_mem;
169 enum i40iw_status_code ret;
171 ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
172 sizeof(struct i40iw_puda_buf));
174 i40iw_debug(dev, I40IW_DEBUG_PUDA,
175 "%s: error mem for buf\n", __func__);
178 buf = (struct i40iw_puda_buf *)buf_mem.va;
179 ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
181 i40iw_debug(dev, I40IW_DEBUG_PUDA,
182 "%s: error dma mem for buf\n", __func__);
183 i40iw_free_virt_mem(dev->hw, &buf_mem);
186 buf->buf_mem.va = buf_mem.va;
187 buf->buf_mem.size = buf_mem.size;
192 * i40iw_puda_dele_buf - delete buffer back to system
194 * @buf: buffer to free
196 static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
197 struct i40iw_puda_buf *buf)
199 i40iw_free_dma_mem(dev->hw, &buf->mem);
200 i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
204 * i40iw_puda_get_next_send_wqe - return next wqe for processing
205 * @qp: puda qp for wqe
206 * @wqe_idx: wqe index for caller
208 static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
211 enum i40iw_status_code ret_code = 0;
213 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
215 qp->swqe_polarity = !qp->swqe_polarity;
216 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
219 wqe = qp->sq_base[*wqe_idx].elem;
225 * i40iw_puda_poll_info - poll cq for completion
227 * @info: info return for successful completion
229 static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
230 struct i40iw_puda_completion_info *info)
232 u64 qword0, qword2, qword3;
236 u32 major_err, minor_err;
239 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
240 get_64bit_val(cqe, 24, &qword3);
241 valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
243 if (valid_bit != cq->cq_uk.polarity)
244 return I40IW_ERR_QUEUE_EMPTY;
246 i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
247 error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
249 i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
250 major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
251 minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
252 info->compl_error = major_err << 16 | minor_err;
253 return I40IW_ERR_CQ_COMPL_ERROR;
256 get_64bit_val(cqe, 0, &qword0);
257 get_64bit_val(cqe, 16, &qword2);
259 info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
260 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
262 get_64bit_val(cqe, 8, &comp_ctx);
263 info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
264 info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
266 if (info->q_type == I40IW_CQE_QTYPE_RQ) {
267 info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
268 info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
269 info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
270 info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
277 * i40iw_puda_poll_completion - processes completion for cq
279 * @cq: cq getting interrupt
280 * @compl_err: return any completion err
282 enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
283 struct i40iw_sc_cq *cq, u32 *compl_err)
285 struct i40iw_qp_uk *qp;
286 struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
287 struct i40iw_puda_completion_info info;
288 enum i40iw_status_code ret = 0;
289 struct i40iw_puda_buf *buf;
290 struct i40iw_puda_rsrc *rsrc;
292 u8 cq_type = cq->cq_type;
295 if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
296 rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
298 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
299 return I40IW_ERR_BAD_PTR;
301 memset(&info, 0, sizeof(info));
302 ret = i40iw_puda_poll_info(cq, &info);
303 *compl_err = info.compl_error;
304 if (ret == I40IW_ERR_QUEUE_EMPTY)
311 ret = I40IW_ERR_BAD_PTR;
315 if (qp->qp_id != rsrc->qp_id) {
316 ret = I40IW_ERR_BAD_PTR;
320 if (info.q_type == I40IW_CQE_QTYPE_RQ) {
321 buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
322 /* Get all the tcpip information in the buf header */
323 ret = i40iw_puda_get_tcpip_info(&info, buf);
325 rsrc->stats_rcvd_pkt_err++;
326 if (cq_type == I40IW_CQ_TYPE_ILQ) {
327 i40iw_ilq_putback_rcvbuf(&rsrc->qp,
330 i40iw_puda_ret_bufpool(rsrc, buf);
331 i40iw_puda_replenish_rq(rsrc, false);
336 rsrc->stats_pkt_rcvd++;
337 rsrc->compl_rxwqe_idx = info.wqe_idx;
338 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
339 rsrc->receive(rsrc->vsi, buf);
340 if (cq_type == I40IW_CQ_TYPE_ILQ)
341 i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
343 i40iw_puda_replenish_rq(rsrc, false);
346 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
347 sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
348 I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
349 rsrc->xmit_complete(rsrc->vsi, sqwrid);
350 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
351 rsrc->tx_wqe_avail_cnt++;
352 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
353 if (!list_empty(&rsrc->vsi->ilq->txpend))
354 i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
358 I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
359 if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
360 cq_uk->polarity = !cq_uk->polarity;
361 /* update cq tail in cq shadow memory also */
362 I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
363 set_64bit_val(cq_uk->shadow_area, 0,
364 I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
369 * i40iw_puda_send - complete send wqe for transmit
370 * @qp: puda qp for send
371 * @info: buffer information for transmit
373 enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
374 struct i40iw_puda_send_info *info)
382 /* number of 32 bits DWORDS in header */
383 l4len = info->tcplen >> 2;
392 wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
394 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
395 qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
396 /* Third line of WQE descriptor */
397 /* maclen is in words */
398 header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
399 LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
400 LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
401 LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
402 /* Forth line of WQE descriptor */
403 header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
404 LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
405 LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
406 LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
408 set_64bit_val(wqe, 0, info->paddr);
409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
410 set_64bit_val(wqe, 16, header[0]);
412 /* Ensure all data is written before writing valid bit */
414 set_64bit_val(wqe, 24, header[1]);
416 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
417 i40iw_qp_post_wr(&qp->qp_uk);
422 * i40iw_puda_send_buf - transmit puda buffer
423 * @rsrc: resource to use for buffer
424 * @buf: puda buffer to transmit
426 void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
428 struct i40iw_puda_send_info info;
429 enum i40iw_status_code ret = 0;
432 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
433 /* if no wqe available or not from a completion and we have
434 * pending buffers, we must queue new buffer
436 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
437 list_add_tail(&buf->list, &rsrc->txpend);
438 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
439 rsrc->stats_sent_pkt_q++;
440 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
441 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
442 "%s: adding to txpend\n", __func__);
445 rsrc->tx_wqe_avail_cnt--;
446 /* if we are coming from a completion and have pending buffers
447 * then Get one from pending list
450 buf = i40iw_puda_get_listbuf(&rsrc->txpend);
455 info.scratch = (void *)buf;
456 info.paddr = buf->mem.pa;
457 info.len = buf->totallen;
458 info.tcplen = buf->tcphlen;
459 info.maclen = buf->maclen;
460 info.ipv4 = buf->ipv4;
461 info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
463 ret = i40iw_puda_send(&rsrc->qp, &info);
465 rsrc->tx_wqe_avail_cnt++;
466 rsrc->stats_sent_pkt_q++;
467 list_add(&buf->list, &rsrc->txpend);
468 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
469 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
470 "%s: adding to puda_send\n", __func__);
472 rsrc->stats_pkt_sent++;
475 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
479 * i40iw_puda_qp_setctx - during init, set qp's context
480 * @rsrc: qp's resource
482 static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
484 struct i40iw_sc_qp *qp = &rsrc->qp;
485 u64 *qp_ctx = qp->hw_host_ctx;
487 set_64bit_val(qp_ctx, 8, qp->sq_pa);
488 set_64bit_val(qp_ctx, 16, qp->rq_pa);
490 set_64bit_val(qp_ctx, 24,
491 LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
492 LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
494 set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
495 set_64bit_val(qp_ctx, 56, 0);
496 set_64bit_val(qp_ctx, 64, 1);
498 set_64bit_val(qp_ctx, 136,
499 LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
500 LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
502 set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
504 set_64bit_val(qp_ctx, 168,
505 LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
507 set_64bit_val(qp_ctx, 176,
508 LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
509 LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
510 LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
512 i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
513 qp_ctx, I40IW_QP_CTX_SIZE);
517 * i40iw_puda_qp_wqe - setup wqe for qp create
518 * @rsrc: resource for qp
520 static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
522 struct i40iw_sc_cqp *cqp;
525 struct i40iw_ccq_cqe_info compl_info;
526 enum i40iw_status_code status = 0;
529 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
531 return I40IW_ERR_RING_FULL;
533 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
534 set_64bit_val(wqe, 40, qp->shadow_area_pa);
535 header = qp->qp_uk.qp_id |
536 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
537 LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
538 LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
539 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
540 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
542 set_64bit_val(wqe, 24, header);
544 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
545 i40iw_sc_cqp_post_sq(cqp);
546 status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
547 I40IW_CQP_OP_CREATE_QP,
553 * i40iw_puda_qp_create - create qp for resource
554 * @rsrc: resource to use for buffer
556 static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
558 struct i40iw_sc_qp *qp = &rsrc->qp;
559 struct i40iw_qp_uk *ukqp = &qp->qp_uk;
560 enum i40iw_status_code ret = 0;
561 u32 sq_size, rq_size, t_size;
562 struct i40iw_dma_mem *mem;
564 sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
565 rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
566 t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
568 /* Get page aligned memory */
570 i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
573 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
578 memset(mem->va, 0, t_size);
579 qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
580 qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
581 qp->pd = &rsrc->sc_pd;
582 qp->qp_type = I40IW_QP_TYPE_UDA;
584 qp->back_qp = (void *)rsrc;
586 qp->rq_pa = qp->sq_pa + sq_size;
588 ukqp->sq_base = mem->va;
589 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
590 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
591 qp->shadow_area_pa = qp->rq_pa + rq_size;
592 qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
594 qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
595 ukqp->qp_id = rsrc->qp_id;
596 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
597 ukqp->rq_wrid_array = rsrc->rq_wrid_array;
599 ukqp->qp_id = rsrc->qp_id;
600 ukqp->sq_size = rsrc->sq_size;
601 ukqp->rq_size = rsrc->rq_size;
603 I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
604 I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
605 I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
607 if (qp->pd->dev->is_pf)
608 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
611 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
612 I40E_VFPE_WQEALLOC1);
615 i40iw_qp_add_qos(qp);
616 i40iw_puda_qp_setctx(rsrc);
618 ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
620 ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
622 i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
627 * i40iw_puda_cq_wqe - setup wqe for cq create
628 * @rsrc: resource for cq
630 static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
633 struct i40iw_sc_cqp *cqp;
635 struct i40iw_ccq_cqe_info compl_info;
636 enum i40iw_status_code status = 0;
639 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
641 return I40IW_ERR_RING_FULL;
643 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
644 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
645 set_64bit_val(wqe, 16,
646 LS_64(cq->shadow_read_threshold,
647 I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
648 set_64bit_val(wqe, 32, cq->cq_pa);
650 set_64bit_val(wqe, 40, cq->shadow_area_pa);
652 header = cq->cq_uk.cq_id |
653 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
654 LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
655 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
656 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
657 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
658 set_64bit_val(wqe, 24, header);
660 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
661 wqe, I40IW_CQP_WQE_SIZE * 8);
663 i40iw_sc_cqp_post_sq(dev->cqp);
664 status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
665 I40IW_CQP_OP_CREATE_CQ,
671 * i40iw_puda_cq_create - create cq for resource
672 * @rsrc: resource for which cq to create
674 static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
676 struct i40iw_sc_dev *dev = rsrc->dev;
677 struct i40iw_sc_cq *cq = &rsrc->cq;
678 enum i40iw_status_code ret = 0;
680 struct i40iw_dma_mem *mem;
681 struct i40iw_cq_init_info info;
682 struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
685 cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
686 tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
687 ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
688 I40IW_CQ0_ALIGNMENT_MASK);
693 memset(&info, 0, sizeof(info));
695 info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
696 I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
697 info.shadow_read_threshold = rsrc->cq_size >> 2;
698 info.ceq_id_valid = true;
699 info.cq_base_pa = mem->pa;
700 info.shadow_area_pa = mem->pa + cqsize;
701 init_info->cq_base = mem->va;
702 init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
703 init_info->cq_size = rsrc->cq_size;
704 init_info->cq_id = rsrc->cq_id;
705 info.ceqe_mask = true;
706 info.ceq_id_valid = true;
707 ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
711 ret = i40iw_cqp_cq_create_cmd(dev, cq);
713 ret = i40iw_puda_cq_wqe(dev, cq);
716 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
721 * i40iw_puda_free_qp - free qp for resource
722 * @rsrc: resource for which qp to free
724 static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
726 enum i40iw_status_code ret;
727 struct i40iw_ccq_cqe_info compl_info;
728 struct i40iw_sc_dev *dev = rsrc->dev;
730 if (rsrc->ceq_valid) {
731 i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
735 ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
736 0, false, true, true);
738 i40iw_debug(dev, I40IW_DEBUG_PUDA,
739 "%s error puda qp destroy wqe\n",
743 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
744 I40IW_CQP_OP_DESTROY_QP,
747 i40iw_debug(dev, I40IW_DEBUG_PUDA,
748 "%s error puda qp destroy failed\n",
754 * i40iw_puda_free_cq - free cq for resource
755 * @rsrc: resource for which cq to free
757 static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
759 enum i40iw_status_code ret;
760 struct i40iw_ccq_cqe_info compl_info;
761 struct i40iw_sc_dev *dev = rsrc->dev;
763 if (rsrc->ceq_valid) {
764 i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
767 ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
770 i40iw_debug(dev, I40IW_DEBUG_PUDA,
771 "%s error ieq cq destroy\n",
775 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
776 I40IW_CQP_OP_DESTROY_CQ,
779 i40iw_debug(dev, I40IW_DEBUG_PUDA,
780 "%s error ieq qp destroy done\n",
786 * i40iw_puda_dele_resources - delete all resources during close
788 * @type: type of resource to dele
789 * @reset: true if reset chip
791 void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
792 enum puda_resource_type type,
795 struct i40iw_sc_dev *dev = vsi->dev;
796 struct i40iw_puda_rsrc *rsrc;
797 struct i40iw_puda_buf *buf = NULL;
798 struct i40iw_puda_buf *nextbuf = NULL;
799 struct i40iw_virt_mem *vmem;
802 case I40IW_PUDA_RSRC_TYPE_ILQ:
804 vmem = &vsi->ilq_mem;
806 case I40IW_PUDA_RSRC_TYPE_IEQ:
808 vmem = &vsi->ieq_mem;
811 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
816 switch (rsrc->completion) {
817 case PUDA_HASH_CRC_COMPLETE:
818 i40iw_free_hash_desc(rsrc->hash_desc);
819 case PUDA_QP_CREATED:
821 i40iw_puda_free_qp(rsrc);
823 i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
825 case PUDA_CQ_CREATED:
827 i40iw_puda_free_cq(rsrc);
829 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
832 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
835 /* Free all allocated puda buffers for both tx and rx */
836 buf = rsrc->alloclist;
839 i40iw_puda_dele_buf(dev, buf);
841 rsrc->alloc_buf_count--;
843 i40iw_free_virt_mem(dev->hw, vmem);
847 * i40iw_puda_allocbufs - allocate buffers for resource
848 * @rsrc: resource for buffer allocation
849 * @count: number of buffers to create
851 static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
855 struct i40iw_puda_buf *buf;
856 struct i40iw_puda_buf *nextbuf;
858 for (i = 0; i < count; i++) {
859 buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
861 rsrc->stats_buf_alloc_fail++;
862 return I40IW_ERR_NO_MEMORY;
864 i40iw_puda_ret_bufpool(rsrc, buf);
865 rsrc->alloc_buf_count++;
866 if (!rsrc->alloclist) {
867 rsrc->alloclist = buf;
869 nextbuf = rsrc->alloclist;
870 rsrc->alloclist = buf;
874 rsrc->avail_buf_count = rsrc->alloc_buf_count;
879 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
881 * @info: resource information
883 enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
884 struct i40iw_puda_rsrc_info *info)
886 struct i40iw_sc_dev *dev = vsi->dev;
887 enum i40iw_status_code ret = 0;
888 struct i40iw_puda_rsrc *rsrc;
890 u32 sqwridsize, rqwridsize;
891 struct i40iw_virt_mem *vmem;
894 pudasize = sizeof(struct i40iw_puda_rsrc);
895 sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
896 rqwridsize = info->rq_size * 8;
897 switch (info->type) {
898 case I40IW_PUDA_RSRC_TYPE_ILQ:
899 vmem = &vsi->ilq_mem;
901 case I40IW_PUDA_RSRC_TYPE_IEQ:
902 vmem = &vsi->ieq_mem;
905 return I40IW_NOT_SUPPORTED;
908 i40iw_allocate_virt_mem(dev->hw, vmem,
909 pudasize + sqwridsize + rqwridsize);
912 rsrc = (struct i40iw_puda_rsrc *)vmem->va;
913 spin_lock_init(&rsrc->bufpool_lock);
914 if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
915 vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
916 vsi->ilq_count = info->count;
917 rsrc->receive = info->receive;
918 rsrc->xmit_complete = info->xmit_complete;
920 vmem = &vsi->ieq_mem;
921 vsi->ieq_count = info->count;
922 vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
923 rsrc->receive = i40iw_ieq_receive;
924 rsrc->xmit_complete = i40iw_ieq_tx_compl;
927 rsrc->ceq_valid = info->ceq_valid;
928 rsrc->type = info->type;
929 rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
930 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
931 /* Initialize all ieq lists */
932 INIT_LIST_HEAD(&rsrc->bufpool);
933 INIT_LIST_HEAD(&rsrc->txpend);
935 rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
936 dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
937 rsrc->qp_id = info->qp_id;
938 rsrc->cq_id = info->cq_id;
939 rsrc->sq_size = info->sq_size;
940 rsrc->rq_size = info->rq_size;
941 rsrc->cq_size = info->rq_size + info->sq_size;
942 rsrc->buf_size = info->buf_size;
946 ret = i40iw_puda_cq_create(rsrc);
948 rsrc->completion = PUDA_CQ_CREATED;
949 ret = i40iw_puda_qp_create(rsrc);
952 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
955 rsrc->completion = PUDA_QP_CREATED;
957 ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
959 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
963 rsrc->rxq_invalid_cnt = info->rq_size;
964 ret = i40iw_puda_replenish_rq(rsrc, true);
968 if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
969 if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
970 rsrc->check_crc = true;
971 rsrc->completion = PUDA_HASH_CRC_COMPLETE;
976 dev->ccq_ops->ccq_arm(&rsrc->cq);
979 i40iw_puda_dele_resources(vsi, info->type, false);
985 * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
986 * @qp: ilq's qp resource
987 * @wqe_idx: wqe index of completed rcvbuf
989 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
994 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
995 get_64bit_val(wqe, 24, &offset24);
996 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
997 set_64bit_val(wqe, 24, offset24);
1001 * i40iw_ieq_get_fpdu - given length return fpdu length
1002 * @length: length if fpdu
1004 static u16 i40iw_ieq_get_fpdu_length(u16 length)
1008 fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
1009 fpdu_len = (fpdu_len + 3) & 0xfffffffc;
1014 * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
1015 * @buf: rcv buffer with partial
1016 * @txbuf: tx buffer for sendign back
1017 * @buf_offset: rcv buffer offset to copy from
1018 * @txbuf_offset: at offset in tx buf to copy
1019 * @length: length of data to copy
1021 static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
1022 struct i40iw_puda_buf *txbuf,
1023 u16 buf_offset, u32 txbuf_offset,
1026 void *mem1 = (u8 *)buf->mem.va + buf_offset;
1027 void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
1029 memcpy(mem2, mem1, length);
1033 * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
1034 * @buf: reeive buffer with partial
1035 * @txbuf: buffer to prepare
1037 static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
1038 struct i40iw_puda_buf *txbuf)
1040 txbuf->maclen = buf->maclen;
1041 txbuf->tcphlen = buf->tcphlen;
1042 txbuf->ipv4 = buf->ipv4;
1043 txbuf->hdrlen = buf->hdrlen;
1044 i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
1048 * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
1049 * @buf: receive exception buffer
1050 * @fps: first partial sequence number
1052 static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
1056 if (buf->seqnum < fps) {
1057 offset = fps - buf->seqnum;
1058 if (offset > buf->datalen)
1060 buf->data += offset;
1061 buf->datalen -= (u16)offset;
1067 * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1068 * @ieq: ieq resource
1069 * @rxlist: ieq's received buffer list
1070 * @pbufl: temporary list for buffers for fpddu
1071 * @txbuf: tx buffer for fpdu
1072 * @fpdu_len: total length of fpdu
1074 static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
1075 struct list_head *rxlist,
1076 struct list_head *pbufl,
1077 struct i40iw_puda_buf *txbuf,
1080 struct i40iw_puda_buf *buf;
1082 u16 txoffset, bufoffset;
1084 buf = i40iw_puda_get_listbuf(pbufl);
1087 nextseqnum = buf->seqnum + fpdu_len;
1088 txbuf->totallen = buf->hdrlen + fpdu_len;
1089 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1090 i40iw_ieq_setup_tx_buf(buf, txbuf);
1092 txoffset = buf->hdrlen;
1093 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1096 if (buf->datalen >= fpdu_len) {
1097 /* copied full fpdu */
1098 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
1099 buf->datalen -= fpdu_len;
1100 buf->data += fpdu_len;
1101 buf->seqnum = nextseqnum;
1104 /* copy partial fpdu */
1105 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
1106 txoffset += buf->datalen;
1107 fpdu_len -= buf->datalen;
1108 i40iw_puda_ret_bufpool(ieq, buf);
1109 buf = i40iw_puda_get_listbuf(pbufl);
1112 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1115 /* last buffer on the list*/
1117 list_add(&buf->list, rxlist);
1119 i40iw_puda_ret_bufpool(ieq, buf);
1123 * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1124 * @rxlist: resource list for receive ieq buffes
1125 * @pbufl: temp. list for buffers for fpddu
1126 * @buf: first receive buffer
1127 * @fpdu_len: total length of fpdu
1129 static enum i40iw_status_code i40iw_ieq_create_pbufl(
1130 struct i40iw_pfpdu *pfpdu,
1131 struct list_head *rxlist,
1132 struct list_head *pbufl,
1133 struct i40iw_puda_buf *buf,
1136 enum i40iw_status_code status = 0;
1137 struct i40iw_puda_buf *nextbuf;
1139 u16 plen = fpdu_len - buf->datalen;
1142 nextseqnum = buf->seqnum + buf->datalen;
1144 nextbuf = i40iw_puda_get_listbuf(rxlist);
1146 status = I40IW_ERR_list_empty;
1149 list_add_tail(&nextbuf->list, pbufl);
1150 if (nextbuf->seqnum != nextseqnum) {
1151 pfpdu->bad_seq_num++;
1152 status = I40IW_ERR_SEQ_NUM;
1155 if (nextbuf->datalen >= plen) {
1158 plen -= nextbuf->datalen;
1159 nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1168 * i40iw_ieq_handle_partial - process partial fpdu buffer
1169 * @ieq: ieq resource
1170 * @pfpdu: partial management per user qp
1171 * @buf: receive buffer
1172 * @fpdu_len: fpdu len in the buffer
1174 static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
1175 struct i40iw_pfpdu *pfpdu,
1176 struct i40iw_puda_buf *buf,
1179 enum i40iw_status_code status = 0;
1182 u32 seqnum = buf->seqnum;
1183 struct list_head pbufl; /* partial buffer list */
1184 struct i40iw_puda_buf *txbuf = NULL;
1185 struct list_head *rxlist = &pfpdu->rxlist;
1187 INIT_LIST_HEAD(&pbufl);
1188 list_add(&buf->list, &pbufl);
1190 status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1194 txbuf = i40iw_puda_get_bufpool(ieq);
1196 pfpdu->no_tx_bufs++;
1197 status = I40IW_ERR_NO_TXBUFS;
1201 i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1202 i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1203 crcptr = txbuf->data + fpdu_len - 4;
1204 mpacrc = *(u32 *)crcptr;
1205 if (ieq->check_crc) {
1206 status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1207 (fpdu_len - 4), mpacrc);
1209 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1210 "%s: error bad crc\n", __func__);
1215 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
1216 txbuf->mem.va, txbuf->totallen);
1217 i40iw_puda_send_buf(ieq, txbuf);
1218 pfpdu->rcv_nxt = seqnum + fpdu_len;
1221 while (!list_empty(&pbufl)) {
1222 buf = (struct i40iw_puda_buf *)(pbufl.prev);
1223 list_del(&buf->list);
1224 list_add(&buf->list, rxlist);
1227 i40iw_puda_ret_bufpool(ieq, txbuf);
1232 * i40iw_ieq_process_buf - process buffer rcvd for ieq
1233 * @ieq: ieq resource
1234 * @pfpdu: partial management per user qp
1235 * @buf: receive buffer
1237 static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
1238 struct i40iw_pfpdu *pfpdu,
1239 struct i40iw_puda_buf *buf)
1242 u16 datalen = buf->datalen;
1243 u8 *datap = buf->data;
1247 u32 seqnum = buf->seqnum;
1250 bool partial = false;
1251 struct i40iw_puda_buf *txbuf;
1252 struct list_head *rxlist = &pfpdu->rxlist;
1253 enum i40iw_status_code ret = 0;
1254 enum i40iw_status_code status = 0;
1256 ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1258 fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
1259 if (fpdu_len > pfpdu->max_fpdu_data) {
1260 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1261 "%s: error bad fpdu_len\n", __func__);
1262 status = I40IW_ERR_MPA_CRC;
1263 list_add(&buf->list, rxlist);
1267 if (datalen < fpdu_len) {
1271 crcptr = datap + fpdu_len - 4;
1272 mpacrc = *(u32 *)crcptr;
1274 ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
1275 datap, fpdu_len - 4, mpacrc);
1277 status = I40IW_ERR_MPA_CRC;
1278 list_add(&buf->list, rxlist);
1282 pfpdu->fpdu_processed++;
1285 datalen -= fpdu_len;
1288 /* copy full pdu's in the txbuf and send them out */
1289 txbuf = i40iw_puda_get_bufpool(ieq);
1291 pfpdu->no_tx_bufs++;
1292 status = I40IW_ERR_NO_TXBUFS;
1293 list_add(&buf->list, rxlist);
1296 /* modify txbuf's buffer header */
1297 i40iw_ieq_setup_tx_buf(buf, txbuf);
1298 /* copy full fpdu's to new buffer */
1299 i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
1301 txbuf->totallen = buf->hdrlen + length;
1303 i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
1304 i40iw_puda_send_buf(ieq, txbuf);
1307 pfpdu->rcv_nxt = buf->seqnum + length;
1308 i40iw_puda_ret_bufpool(ieq, buf);
1312 buf->seqnum = seqnum + length;
1313 buf->datalen = datalen;
1314 pfpdu->rcv_nxt = buf->seqnum;
1317 status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1323 * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1324 * @qp: qp for which partial fpdus
1325 * @ieq: ieq resource
1327 static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
1328 struct i40iw_puda_rsrc *ieq)
1330 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1331 struct list_head *rxlist = &pfpdu->rxlist;
1332 struct i40iw_puda_buf *buf;
1333 enum i40iw_status_code status;
1336 if (list_empty(rxlist))
1338 buf = i40iw_puda_get_listbuf(rxlist);
1340 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1341 "%s: error no buf\n", __func__);
1344 if (buf->seqnum != pfpdu->rcv_nxt) {
1345 /* This could be out of order or missing packet */
1346 pfpdu->out_of_order++;
1347 list_add(&buf->list, rxlist);
1350 /* keep processing buffers from the head of the list */
1351 status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
1352 if (status == I40IW_ERR_MPA_CRC) {
1353 pfpdu->mpa_crc_err = true;
1354 while (!list_empty(rxlist)) {
1355 buf = i40iw_puda_get_listbuf(rxlist);
1356 i40iw_puda_ret_bufpool(ieq, buf);
1359 /* create CQP for AE */
1360 i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
1366 * i40iw_ieq_handle_exception - handle qp's exception
1367 * @ieq: ieq resource
1368 * @qp: qp receiving excpetion
1369 * @buf: receive buffer
1371 static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1372 struct i40iw_sc_qp *qp,
1373 struct i40iw_puda_buf *buf)
1375 struct i40iw_puda_buf *tmpbuf = NULL;
1376 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1377 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1378 u32 rcv_wnd = hw_host_ctx[23];
1379 /* first partial seq # in q2 */
1380 u32 fps = qp->q2_buf[16];
1381 struct list_head *rxlist = &pfpdu->rxlist;
1382 struct list_head *plist;
1384 pfpdu->total_ieq_bufs++;
1386 if (pfpdu->mpa_crc_err) {
1390 if (pfpdu->mode && (fps != pfpdu->fps)) {
1391 /* clean up qp as it is new partial sequence */
1392 i40iw_ieq_cleanup_qp(ieq, qp);
1393 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1394 "%s: restarting new partial\n", __func__);
1395 pfpdu->mode = false;
1399 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
1400 /* First_Partial_Sequence_Number check */
1401 pfpdu->rcv_nxt = fps;
1404 pfpdu->max_fpdu_data = ieq->vsi->mss;
1405 pfpdu->pmode_count++;
1406 INIT_LIST_HEAD(rxlist);
1407 i40iw_ieq_check_first_buf(buf, fps);
1410 if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1411 pfpdu->bad_seq_num++;
1415 if (!list_empty(rxlist)) {
1416 tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1417 plist = &tmpbuf->list;
1418 while ((struct list_head *)tmpbuf != rxlist) {
1419 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1421 tmpbuf = (struct i40iw_puda_buf *)plist->next;
1423 /* Insert buf before tmpbuf */
1424 list_add_tail(&buf->list, &tmpbuf->list);
1426 list_add_tail(&buf->list, rxlist);
1428 i40iw_ieq_process_fpdus(qp, ieq);
1431 i40iw_puda_ret_bufpool(ieq, buf);
1435 * i40iw_ieq_receive - received exception buffer
1436 * @dev: iwarp device
1437 * @buf: exception buffer received
1439 static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
1440 struct i40iw_puda_buf *buf)
1442 struct i40iw_puda_rsrc *ieq = vsi->ieq;
1443 struct i40iw_sc_qp *qp = NULL;
1444 u32 wqe_idx = ieq->compl_rxwqe_idx;
1446 qp = i40iw_ieq_get_qp(vsi->dev, buf);
1448 ieq->stats_bad_qp_id++;
1449 i40iw_puda_ret_bufpool(ieq, buf);
1451 i40iw_ieq_handle_exception(ieq, qp, buf);
1454 * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1455 * on which wqe_idx to start replenish rq
1457 if (!ieq->rxq_invalid_cnt)
1458 ieq->rx_wqe_idx = wqe_idx;
1459 ieq->rxq_invalid_cnt++;
1463 * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1464 * @vsi: pointer to the vsi structure
1465 * @sqwrid: pointer to puda buffer
1467 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
1469 struct i40iw_puda_rsrc *ieq = vsi->ieq;
1470 struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
1472 i40iw_puda_ret_bufpool(ieq, buf);
1473 if (!list_empty(&ieq->txpend)) {
1474 buf = i40iw_puda_get_listbuf(&ieq->txpend);
1475 i40iw_puda_send_buf(ieq, buf);
1480 * i40iw_ieq_cleanup_qp - qp is being destroyed
1481 * @ieq: ieq resource
1482 * @qp: all pending fpdu buffers
1484 static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
1486 struct i40iw_puda_buf *buf;
1487 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1488 struct list_head *rxlist = &pfpdu->rxlist;
1492 while (!list_empty(rxlist)) {
1493 buf = i40iw_puda_get_listbuf(rxlist);
1494 i40iw_puda_ret_bufpool(ieq, buf);