3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 /* cut down ridiculously long IB macro names */
59 #define OP(x) IB_OPCODE_RC_##x
61 static void rc_timeout(unsigned long arg);
63 static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe,
68 len = delta_psn(psn, wqe->psn) * pmtu;
69 ss->sge = wqe->sg_list[0];
70 ss->sg_list = wqe->sg_list + 1;
71 ss->num_sge = wqe->wr.num_sge;
72 ss->total_len = wqe->length;
73 hfi1_skip_sge(ss, len, 0);
74 return wqe->length - len;
77 static void start_timer(struct hfi1_qp *qp)
79 qp->s_flags |= HFI1_S_TIMER;
80 qp->s_timer.function = rc_timeout;
81 /* 4.096 usec. * (1 << qp->timeout) */
82 qp->s_timer.expires = jiffies + qp->timeout_jiffies;
83 add_timer(&qp->s_timer);
87 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
88 * @dev: the device for this QP
89 * @qp: a pointer to the QP
90 * @ohdr: a pointer to the IB header being constructed
93 * Return 1 if constructed; otherwise, return 0.
94 * Note that we are in the responder's side of the QP context.
95 * Note the QP s_lock must be held.
97 static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
98 struct hfi1_other_headers *ohdr, u32 pmtu)
100 struct hfi1_ack_entry *e;
107 /* Don't send an ACK if we aren't supposed to. */
108 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
111 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
114 switch (qp->s_ack_state) {
115 case OP(RDMA_READ_RESPONSE_LAST):
116 case OP(RDMA_READ_RESPONSE_ONLY):
117 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
118 if (e->rdma_sge.mr) {
119 hfi1_put_mr(e->rdma_sge.mr);
120 e->rdma_sge.mr = NULL;
123 case OP(ATOMIC_ACKNOWLEDGE):
125 * We can increment the tail pointer now that the last
126 * response has been sent instead of only being
129 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
130 qp->s_tail_ack_queue = 0;
133 case OP(ACKNOWLEDGE):
134 /* Check for no next entry in the queue. */
135 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
136 if (qp->s_flags & HFI1_S_ACK_PENDING)
141 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
142 if (e->opcode == OP(RDMA_READ_REQUEST)) {
144 * If a RDMA read response is being resent and
145 * we haven't seen the duplicate request yet,
146 * then stop sending the remaining responses the
147 * responder has seen until the requester re-sends it.
149 len = e->rdma_sge.sge_length;
150 if (len && !e->rdma_sge.mr) {
151 qp->s_tail_ack_queue = qp->r_head_ack_queue;
154 /* Copy SGE state in case we need to resend */
155 qp->s_rdma_mr = e->rdma_sge.mr;
157 hfi1_get_mr(qp->s_rdma_mr);
158 qp->s_ack_rdma_sge.sge = e->rdma_sge;
159 qp->s_ack_rdma_sge.num_sge = 1;
160 qp->s_cur_sge = &qp->s_ack_rdma_sge;
163 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
165 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
168 ohdr->u.aeth = hfi1_compute_aeth(qp);
170 qp->s_ack_rdma_psn = e->psn;
171 bth2 = mask_psn(qp->s_ack_rdma_psn++);
173 /* COMPARE_SWAP or FETCH_ADD */
174 qp->s_cur_sge = NULL;
176 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
177 ohdr->u.at.aeth = hfi1_compute_aeth(qp);
178 ohdr->u.at.atomic_ack_eth[0] =
179 cpu_to_be32(e->atomic_data >> 32);
180 ohdr->u.at.atomic_ack_eth[1] =
181 cpu_to_be32(e->atomic_data);
182 hwords += sizeof(ohdr->u.at) / sizeof(u32);
183 bth2 = mask_psn(e->psn);
186 bth0 = qp->s_ack_state << 24;
189 case OP(RDMA_READ_RESPONSE_FIRST):
190 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
192 case OP(RDMA_READ_RESPONSE_MIDDLE):
193 qp->s_cur_sge = &qp->s_ack_rdma_sge;
194 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
196 hfi1_get_mr(qp->s_rdma_mr);
197 len = qp->s_ack_rdma_sge.sge.sge_length;
200 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
202 ohdr->u.aeth = hfi1_compute_aeth(qp);
204 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
205 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
208 bth0 = qp->s_ack_state << 24;
209 bth2 = mask_psn(qp->s_ack_rdma_psn++);
215 * Send a regular ACK.
216 * Set the s_ack_state so we wait until after sending
217 * the ACK before setting s_ack_state to ACKNOWLEDGE
220 qp->s_ack_state = OP(SEND_ONLY);
221 qp->s_flags &= ~HFI1_S_ACK_PENDING;
222 qp->s_cur_sge = NULL;
225 cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
227 HFI1_AETH_CREDIT_SHIFT));
229 ohdr->u.aeth = hfi1_compute_aeth(qp);
232 bth0 = OP(ACKNOWLEDGE) << 24;
233 bth2 = mask_psn(qp->s_ack_psn);
235 qp->s_rdma_ack_cnt++;
236 qp->s_hdrwords = hwords;
237 qp->s_cur_size = len;
238 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle);
242 qp->s_ack_state = OP(ACKNOWLEDGE);
244 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
245 * HFI1_S_RESP_PENDING
248 qp->s_flags &= ~(HFI1_S_RESP_PENDING
255 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
256 * @qp: a pointer to the QP
258 * Return 1 if constructed; otherwise, return 0.
260 int hfi1_make_rc_req(struct hfi1_qp *qp)
262 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
263 struct hfi1_other_headers *ohdr;
264 struct hfi1_sge_state *ss;
265 struct hfi1_swqe *wqe;
266 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
278 ohdr = &qp->s_hdr->ibh.u.oth;
279 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
280 ohdr = &qp->s_hdr->ibh.u.l.oth;
283 * The lock is needed to synchronize between the sending tasklet,
284 * the receive interrupt handler, and timeout re-sends.
286 spin_lock_irqsave(&qp->s_lock, flags);
288 /* Sending responses has higher priority over sending requests. */
289 if ((qp->s_flags & HFI1_S_RESP_PENDING) &&
290 make_rc_ack(dev, qp, ohdr, pmtu))
293 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) {
294 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
296 /* We are in the error state, flush the work request. */
297 if (qp->s_last == qp->s_head)
299 /* If DMAs are in progress, we can't flush immediately. */
300 if (atomic_read(&qp->s_iowait.sdma_busy)) {
301 qp->s_flags |= HFI1_S_WAIT_DMA;
305 wqe = get_swqe_ptr(qp, qp->s_last);
306 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
307 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
308 /* will get called again */
312 if (qp->s_flags & (HFI1_S_WAIT_RNR | HFI1_S_WAIT_ACK))
315 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
316 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
317 qp->s_flags |= HFI1_S_WAIT_PSN;
320 qp->s_sending_psn = qp->s_psn;
321 qp->s_sending_hpsn = qp->s_psn - 1;
324 /* Send a request. */
325 wqe = get_swqe_ptr(qp, qp->s_cur);
326 switch (qp->s_state) {
328 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK))
331 * Resend an old request or start a new one.
333 * We keep track of the current SWQE so that
334 * we don't reset the "furthest progress" state
335 * if we need to back up.
338 if (qp->s_cur == qp->s_tail) {
339 /* Check if send work queue is empty. */
340 if (qp->s_tail == qp->s_head) {
345 * If a fence is requested, wait for previous
346 * RDMA read and atomic operations to finish.
348 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
349 qp->s_num_rd_atomic) {
350 qp->s_flags |= HFI1_S_WAIT_FENCE;
353 wqe->psn = qp->s_next_psn;
357 * Note that we have to be careful not to modify the
358 * original work request since we may need to resend
363 bth2 = mask_psn(qp->s_psn);
364 switch (wqe->wr.opcode) {
366 case IB_WR_SEND_WITH_IMM:
367 /* If no credit, return. */
368 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
369 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
370 qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
373 wqe->lpsn = wqe->psn;
375 wqe->lpsn += (len - 1) / pmtu;
376 qp->s_state = OP(SEND_FIRST);
380 if (wqe->wr.opcode == IB_WR_SEND)
381 qp->s_state = OP(SEND_ONLY);
383 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
384 /* Immediate data comes after the BTH */
385 ohdr->u.imm_data = wqe->wr.ex.imm_data;
388 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
389 bth0 |= IB_BTH_SOLICITED;
390 bth2 |= IB_BTH_REQ_ACK;
391 if (++qp->s_cur == qp->s_size)
395 case IB_WR_RDMA_WRITE:
396 if (newreq && !(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
399 case IB_WR_RDMA_WRITE_WITH_IMM:
400 /* If no credit, return. */
401 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
402 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
403 qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
406 ohdr->u.rc.reth.vaddr =
407 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
408 ohdr->u.rc.reth.rkey =
409 cpu_to_be32(wqe->wr.wr.rdma.rkey);
410 ohdr->u.rc.reth.length = cpu_to_be32(len);
411 hwords += sizeof(struct ib_reth) / sizeof(u32);
412 wqe->lpsn = wqe->psn;
414 wqe->lpsn += (len - 1) / pmtu;
415 qp->s_state = OP(RDMA_WRITE_FIRST);
419 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
420 qp->s_state = OP(RDMA_WRITE_ONLY);
423 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
424 /* Immediate data comes after RETH */
425 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
427 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
428 bth0 |= IB_BTH_SOLICITED;
430 bth2 |= IB_BTH_REQ_ACK;
431 if (++qp->s_cur == qp->s_size)
435 case IB_WR_RDMA_READ:
437 * Don't allow more operations to be started
438 * than the QP limits allow.
441 if (qp->s_num_rd_atomic >=
442 qp->s_max_rd_atomic) {
443 qp->s_flags |= HFI1_S_WAIT_RDMAR;
446 qp->s_num_rd_atomic++;
447 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
450 * Adjust s_next_psn to count the
451 * expected number of responses.
454 qp->s_next_psn += (len - 1) / pmtu;
455 wqe->lpsn = qp->s_next_psn++;
457 ohdr->u.rc.reth.vaddr =
458 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
459 ohdr->u.rc.reth.rkey =
460 cpu_to_be32(wqe->wr.wr.rdma.rkey);
461 ohdr->u.rc.reth.length = cpu_to_be32(len);
462 qp->s_state = OP(RDMA_READ_REQUEST);
463 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
466 bth2 |= IB_BTH_REQ_ACK;
467 if (++qp->s_cur == qp->s_size)
471 case IB_WR_ATOMIC_CMP_AND_SWP:
472 case IB_WR_ATOMIC_FETCH_AND_ADD:
474 * Don't allow more operations to be started
475 * than the QP limits allow.
478 if (qp->s_num_rd_atomic >=
479 qp->s_max_rd_atomic) {
480 qp->s_flags |= HFI1_S_WAIT_RDMAR;
483 qp->s_num_rd_atomic++;
484 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
486 wqe->lpsn = wqe->psn;
488 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
489 qp->s_state = OP(COMPARE_SWAP);
490 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
491 wqe->wr.wr.atomic.swap);
492 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
493 wqe->wr.wr.atomic.compare_add);
495 qp->s_state = OP(FETCH_ADD);
496 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
497 wqe->wr.wr.atomic.compare_add);
498 ohdr->u.atomic_eth.compare_data = 0;
500 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
501 wqe->wr.wr.atomic.remote_addr >> 32);
502 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
503 wqe->wr.wr.atomic.remote_addr);
504 ohdr->u.atomic_eth.rkey = cpu_to_be32(
505 wqe->wr.wr.atomic.rkey);
506 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
509 bth2 |= IB_BTH_REQ_ACK;
510 if (++qp->s_cur == qp->s_size)
517 qp->s_sge.sge = wqe->sg_list[0];
518 qp->s_sge.sg_list = wqe->sg_list + 1;
519 qp->s_sge.num_sge = wqe->wr.num_sge;
520 qp->s_sge.total_len = wqe->length;
521 qp->s_len = wqe->length;
524 if (qp->s_tail >= qp->s_size)
527 if (wqe->wr.opcode == IB_WR_RDMA_READ)
528 qp->s_psn = wqe->lpsn + 1;
531 if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
532 qp->s_next_psn = qp->s_psn;
536 case OP(RDMA_READ_RESPONSE_FIRST):
538 * qp->s_state is normally set to the opcode of the
539 * last packet constructed for new requests and therefore
540 * is never set to RDMA read response.
541 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
542 * thread to indicate a SEND needs to be restarted from an
543 * earlier PSN without interfering with the sending thread.
546 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
549 qp->s_state = OP(SEND_MIDDLE);
551 case OP(SEND_MIDDLE):
552 bth2 = mask_psn(qp->s_psn++);
553 if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
554 qp->s_next_psn = qp->s_psn;
559 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
562 if (wqe->wr.opcode == IB_WR_SEND)
563 qp->s_state = OP(SEND_LAST);
565 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
566 /* Immediate data comes after the BTH */
567 ohdr->u.imm_data = wqe->wr.ex.imm_data;
570 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
571 bth0 |= IB_BTH_SOLICITED;
572 bth2 |= IB_BTH_REQ_ACK;
574 if (qp->s_cur >= qp->s_size)
578 case OP(RDMA_READ_RESPONSE_LAST):
580 * qp->s_state is normally set to the opcode of the
581 * last packet constructed for new requests and therefore
582 * is never set to RDMA read response.
583 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
584 * thread to indicate a RDMA write needs to be restarted from
585 * an earlier PSN without interfering with the sending thread.
588 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
590 case OP(RDMA_WRITE_FIRST):
591 qp->s_state = OP(RDMA_WRITE_MIDDLE);
593 case OP(RDMA_WRITE_MIDDLE):
594 bth2 = mask_psn(qp->s_psn++);
595 if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
596 qp->s_next_psn = qp->s_psn;
601 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
604 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
605 qp->s_state = OP(RDMA_WRITE_LAST);
607 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
608 /* Immediate data comes after the BTH */
609 ohdr->u.imm_data = wqe->wr.ex.imm_data;
611 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
612 bth0 |= IB_BTH_SOLICITED;
614 bth2 |= IB_BTH_REQ_ACK;
616 if (qp->s_cur >= qp->s_size)
620 case OP(RDMA_READ_RESPONSE_MIDDLE):
622 * qp->s_state is normally set to the opcode of the
623 * last packet constructed for new requests and therefore
624 * is never set to RDMA read response.
625 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
626 * thread to indicate a RDMA read needs to be restarted from
627 * an earlier PSN without interfering with the sending thread.
630 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
631 ohdr->u.rc.reth.vaddr =
632 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
633 ohdr->u.rc.reth.rkey =
634 cpu_to_be32(wqe->wr.wr.rdma.rkey);
635 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
636 qp->s_state = OP(RDMA_READ_REQUEST);
637 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
638 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
639 qp->s_psn = wqe->lpsn + 1;
643 if (qp->s_cur == qp->s_size)
647 qp->s_sending_hpsn = bth2;
648 delta = delta_psn(bth2, wqe->psn);
649 if (delta && delta % HFI1_PSN_CREDIT == 0)
650 bth2 |= IB_BTH_REQ_ACK;
651 if (qp->s_flags & HFI1_S_SEND_ONE) {
652 qp->s_flags &= ~HFI1_S_SEND_ONE;
653 qp->s_flags |= HFI1_S_WAIT_ACK;
654 bth2 |= IB_BTH_REQ_ACK;
657 qp->s_hdrwords = hwords;
659 qp->s_cur_size = len;
660 hfi1_make_ruc_header(
663 bth0 | (qp->s_state << 24),
671 qp->s_flags &= ~HFI1_S_BUSY;
673 spin_unlock_irqrestore(&qp->s_lock, flags);
678 * hfi1_send_rc_ack - Construct an ACK packet and send it
679 * @qp: a pointer to the QP
681 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
682 * Note that RDMA reads and atomics are handled in the
683 * send side QP state and tasklet.
685 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp,
688 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
689 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
690 u64 pbc, pbc_flags = 0;
696 struct send_context *sc;
697 struct pio_buf *pbuf;
698 struct hfi1_ib_header hdr;
699 struct hfi1_other_headers *ohdr;
701 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
702 if (qp->s_flags & HFI1_S_RESP_PENDING)
705 /* Ensure s_rdma_ack_cnt changes are committed */
706 smp_read_barrier_depends();
707 if (qp->s_rdma_ack_cnt)
710 /* Construct the header */
711 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
713 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
714 hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
715 &qp->remote_ah_attr.grh, hwords, 0);
722 /* read pkey_index w/o lock (its atomic) */
723 bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
724 if (qp->s_mig_state == IB_MIG_MIGRATED)
725 bth0 |= IB_BTH_MIG_REQ;
727 ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
729 HFI1_AETH_CREDIT_SHIFT));
731 ohdr->u.aeth = hfi1_compute_aeth(qp);
732 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
733 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
734 pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
735 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
736 hdr.lrh[0] = cpu_to_be16(lrh0);
737 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
738 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
739 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
740 ohdr->bth[0] = cpu_to_be32(bth0);
741 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
742 ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
743 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
745 /* Don't try to send ACKs if the link isn't ACTIVE */
746 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
750 plen = 2 /* PBC */ + hwords;
751 vl = sc_to_vlt(ppd->dd, sc5);
752 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
754 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
757 * We have no room to send at the moment. Pass
758 * responsibility for sending the ACK to the send tasklet
759 * so that when enough buffer space becomes available,
760 * the ACK is sent ahead of other outgoing packets.
765 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
767 /* write the pbc and data */
768 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
773 this_cpu_inc(*ibp->rc_qacks);
774 spin_lock(&qp->s_lock);
775 qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING;
776 qp->s_nak_state = qp->r_nak_state;
777 qp->s_ack_psn = qp->r_ack_psn;
779 qp->s_flags |= HFI1_S_ECN;
781 /* Schedule the send tasklet. */
782 hfi1_schedule_send(qp);
783 spin_unlock(&qp->s_lock);
787 * reset_psn - reset the QP state to send starting from PSN
789 * @psn: the packet sequence number to restart at
791 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
793 * Called at interrupt level with the QP s_lock held.
795 static void reset_psn(struct hfi1_qp *qp, u32 psn)
798 struct hfi1_swqe *wqe = get_swqe_ptr(qp, n);
804 * If we are starting the request from the beginning,
805 * let the normal send code handle initialization.
807 if (cmp_psn(psn, wqe->psn) <= 0) {
808 qp->s_state = OP(SEND_LAST);
812 /* Find the work request opcode corresponding to the given PSN. */
813 opcode = wqe->wr.opcode;
817 if (++n == qp->s_size)
821 wqe = get_swqe_ptr(qp, n);
822 diff = cmp_psn(psn, wqe->psn);
827 * If we are starting the request from the beginning,
828 * let the normal send code handle initialization.
831 qp->s_state = OP(SEND_LAST);
834 opcode = wqe->wr.opcode;
838 * Set the state to restart in the middle of a request.
839 * Don't change the s_sge, s_cur_sge, or s_cur_size.
840 * See hfi1_make_rc_req().
844 case IB_WR_SEND_WITH_IMM:
845 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
848 case IB_WR_RDMA_WRITE:
849 case IB_WR_RDMA_WRITE_WITH_IMM:
850 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
853 case IB_WR_RDMA_READ:
854 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
859 * This case shouldn't happen since its only
862 qp->s_state = OP(SEND_LAST);
867 * Set HFI1_S_WAIT_PSN as rc_complete() may start the timer
868 * asynchronously before the send tasklet can get scheduled.
869 * Doing it in hfi1_make_rc_req() is too late.
871 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
872 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
873 qp->s_flags |= HFI1_S_WAIT_PSN;
874 qp->s_flags &= ~HFI1_S_AHG_VALID;
878 * Back up requester to resend the last un-ACKed request.
879 * The QP r_lock and s_lock should be held and interrupts disabled.
881 static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait)
883 struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
884 struct hfi1_ibport *ibp;
886 if (qp->s_retry == 0) {
887 if (qp->s_mig_state == IB_MIG_ARMED) {
889 qp->s_retry = qp->s_retry_cnt;
890 } else if (qp->s_last == qp->s_acked) {
891 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
892 hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
894 } else /* need to handle delayed completion */
899 ibp = to_iport(qp->ibqp.device, qp->port_num);
900 if (wqe->wr.opcode == IB_WR_RDMA_READ)
903 ibp->n_rc_resends += delta_psn(qp->s_psn, psn);
905 qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR |
906 HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_PSN |
909 qp->s_flags |= HFI1_S_SEND_ONE;
914 * This is called from s_timer for missing responses.
916 static void rc_timeout(unsigned long arg)
918 struct hfi1_qp *qp = (struct hfi1_qp *)arg;
919 struct hfi1_ibport *ibp;
922 spin_lock_irqsave(&qp->r_lock, flags);
923 spin_lock(&qp->s_lock);
924 if (qp->s_flags & HFI1_S_TIMER) {
925 ibp = to_iport(qp->ibqp.device, qp->port_num);
926 ibp->n_rc_timeouts++;
927 qp->s_flags &= ~HFI1_S_TIMER;
928 del_timer(&qp->s_timer);
929 restart_rc(qp, qp->s_last_psn + 1, 1);
930 hfi1_schedule_send(qp);
932 spin_unlock(&qp->s_lock);
933 spin_unlock_irqrestore(&qp->r_lock, flags);
937 * This is called from s_timer for RNR timeouts.
939 void hfi1_rc_rnr_retry(unsigned long arg)
941 struct hfi1_qp *qp = (struct hfi1_qp *)arg;
944 spin_lock_irqsave(&qp->s_lock, flags);
945 if (qp->s_flags & HFI1_S_WAIT_RNR) {
946 qp->s_flags &= ~HFI1_S_WAIT_RNR;
947 del_timer(&qp->s_timer);
948 hfi1_schedule_send(qp);
950 spin_unlock_irqrestore(&qp->s_lock, flags);
954 * Set qp->s_sending_psn to the next PSN after the given one.
955 * This would be psn+1 except when RDMA reads are present.
957 static void reset_sending_psn(struct hfi1_qp *qp, u32 psn)
959 struct hfi1_swqe *wqe;
962 /* Find the work request corresponding to the given PSN. */
964 wqe = get_swqe_ptr(qp, n);
965 if (cmp_psn(psn, wqe->lpsn) <= 0) {
966 if (wqe->wr.opcode == IB_WR_RDMA_READ)
967 qp->s_sending_psn = wqe->lpsn + 1;
969 qp->s_sending_psn = psn + 1;
972 if (++n == qp->s_size)
980 * This should be called with the QP s_lock held and interrupts disabled.
982 void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr)
984 struct hfi1_other_headers *ohdr;
985 struct hfi1_swqe *wqe;
991 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
994 /* Find out where the BTH is */
995 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
998 ohdr = &hdr->u.l.oth;
1000 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1001 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1002 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1003 WARN_ON(!qp->s_rdma_ack_cnt);
1004 qp->s_rdma_ack_cnt--;
1008 psn = be32_to_cpu(ohdr->bth[2]);
1009 reset_sending_psn(qp, psn);
1012 * Start timer after a packet requesting an ACK has been sent and
1013 * there are still requests that haven't been acked.
1015 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1017 (HFI1_S_TIMER | HFI1_S_WAIT_RNR | HFI1_S_WAIT_PSN)) &&
1018 (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
1021 while (qp->s_last != qp->s_acked) {
1022 wqe = get_swqe_ptr(qp, qp->s_last);
1023 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1024 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1026 for (i = 0; i < wqe->wr.num_sge; i++) {
1027 struct hfi1_sge *sge = &wqe->sg_list[i];
1029 hfi1_put_mr(sge->mr);
1031 /* Post a send completion queue entry if requested. */
1032 if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
1033 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1034 memset(&wc, 0, sizeof(wc));
1035 wc.wr_id = wqe->wr.wr_id;
1036 wc.status = IB_WC_SUCCESS;
1037 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
1038 wc.byte_len = wqe->length;
1040 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1042 if (++qp->s_last >= qp->s_size)
1046 * If we were waiting for sends to complete before re-sending,
1047 * and they are now complete, restart sending.
1049 trace_hfi1_rc_sendcomplete(qp, psn);
1050 if (qp->s_flags & HFI1_S_WAIT_PSN &&
1051 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1052 qp->s_flags &= ~HFI1_S_WAIT_PSN;
1053 qp->s_sending_psn = qp->s_psn;
1054 qp->s_sending_hpsn = qp->s_psn - 1;
1055 hfi1_schedule_send(qp);
1059 static inline void update_last_psn(struct hfi1_qp *qp, u32 psn)
1061 qp->s_last_psn = psn;
1065 * Generate a SWQE completion.
1066 * This is similar to hfi1_send_complete but has to check to be sure
1067 * that the SGEs are not being referenced if the SWQE is being resent.
1069 static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
1070 struct hfi1_swqe *wqe,
1071 struct hfi1_ibport *ibp)
1077 * Don't decrement refcount and don't generate a
1078 * completion if the SWQE is being resent until the send
1081 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1082 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1083 for (i = 0; i < wqe->wr.num_sge; i++) {
1084 struct hfi1_sge *sge = &wqe->sg_list[i];
1086 hfi1_put_mr(sge->mr);
1088 /* Post a send completion queue entry if requested. */
1089 if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
1090 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1091 memset(&wc, 0, sizeof(wc));
1092 wc.wr_id = wqe->wr.wr_id;
1093 wc.status = IB_WC_SUCCESS;
1094 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
1095 wc.byte_len = wqe->length;
1097 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1099 if (++qp->s_last >= qp->s_size)
1102 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1104 this_cpu_inc(*ibp->rc_delayed_comp);
1106 * If send progress not running attempt to progress
1109 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1110 struct sdma_engine *engine;
1113 /* For now use sc to find engine */
1114 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
1115 engine = qp_to_sdma_engine(qp, sc5);
1116 sdma_engine_progress_schedule(engine);
1120 qp->s_retry = qp->s_retry_cnt;
1121 update_last_psn(qp, wqe->lpsn);
1124 * If we are completing a request which is in the process of
1125 * being resent, we can stop re-sending it since we know the
1126 * responder has already seen it.
1128 if (qp->s_acked == qp->s_cur) {
1129 if (++qp->s_cur >= qp->s_size)
1131 qp->s_acked = qp->s_cur;
1132 wqe = get_swqe_ptr(qp, qp->s_cur);
1133 if (qp->s_acked != qp->s_tail) {
1134 qp->s_state = OP(SEND_LAST);
1135 qp->s_psn = wqe->psn;
1138 if (++qp->s_acked >= qp->s_size)
1140 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1142 wqe = get_swqe_ptr(qp, qp->s_acked);
1148 * do_rc_ack - process an incoming RC ACK
1149 * @qp: the QP the ACK came in on
1150 * @psn: the packet sequence number of the ACK
1151 * @opcode: the opcode of the request that resulted in the ACK
1153 * This is called from rc_rcv_resp() to process an incoming RC ACK
1155 * Called at interrupt level with the QP s_lock held.
1156 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1158 static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
1159 u64 val, struct hfi1_ctxtdata *rcd)
1161 struct hfi1_ibport *ibp;
1162 enum ib_wc_status status;
1163 struct hfi1_swqe *wqe;
1168 /* Remove QP from retry timer */
1169 if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
1170 qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
1171 del_timer(&qp->s_timer);
1175 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1176 * requests and implicitly NAK RDMA read and atomic requests issued
1177 * before the NAK'ed request. The MSN won't include the NAK'ed
1178 * request but will include an ACK'ed request(s).
1183 wqe = get_swqe_ptr(qp, qp->s_acked);
1184 ibp = to_iport(qp->ibqp.device, qp->port_num);
1187 * The MSN might be for a later WQE than the PSN indicates so
1188 * only complete WQEs that the PSN finishes.
1190 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
1192 * RDMA_READ_RESPONSE_ONLY is a special case since
1193 * we want to generate completion events for everything
1194 * before the RDMA read, copy the data, then generate
1195 * the completion for the read.
1197 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1198 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1204 * If this request is a RDMA read or atomic, and the ACK is
1205 * for a later operation, this ACK NAKs the RDMA read or
1206 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1207 * can ACK a RDMA read and likewise for atomic ops. Note
1208 * that the NAK case can only happen if relaxed ordering is
1209 * used and requests are sent after an RDMA read or atomic
1210 * is sent but before the response is received.
1212 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1213 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1214 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1215 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1216 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1217 /* Retry this request. */
1218 if (!(qp->r_flags & HFI1_R_RDMAR_SEQ)) {
1219 qp->r_flags |= HFI1_R_RDMAR_SEQ;
1220 restart_rc(qp, qp->s_last_psn + 1, 0);
1221 if (list_empty(&qp->rspwait)) {
1222 qp->r_flags |= HFI1_R_RSP_SEND;
1223 atomic_inc(&qp->refcount);
1224 list_add_tail(&qp->rspwait,
1225 &rcd->qp_wait_list);
1229 * No need to process the ACK/NAK since we are
1230 * restarting an earlier request.
1234 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1235 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1236 u64 *vaddr = wqe->sg_list[0].vaddr;
1239 if (qp->s_num_rd_atomic &&
1240 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1241 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1242 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1243 qp->s_num_rd_atomic--;
1244 /* Restart sending task if fence is complete */
1245 if ((qp->s_flags & HFI1_S_WAIT_FENCE) &&
1246 !qp->s_num_rd_atomic) {
1247 qp->s_flags &= ~(HFI1_S_WAIT_FENCE |
1249 hfi1_schedule_send(qp);
1250 } else if (qp->s_flags & HFI1_S_WAIT_RDMAR) {
1251 qp->s_flags &= ~(HFI1_S_WAIT_RDMAR |
1253 hfi1_schedule_send(qp);
1256 wqe = do_rc_completion(qp, wqe, ibp);
1257 if (qp->s_acked == qp->s_tail)
1261 switch (aeth >> 29) {
1263 this_cpu_inc(*ibp->rc_acks);
1264 if (qp->s_acked != qp->s_tail) {
1266 * We are expecting more ACKs so
1267 * reset the re-transmit timer.
1271 * We can stop re-sending the earlier packets and
1272 * continue with the next packet the receiver wants.
1274 if (cmp_psn(qp->s_psn, psn) <= 0)
1275 reset_psn(qp, psn + 1);
1276 } else if (cmp_psn(qp->s_psn, psn) <= 0) {
1277 qp->s_state = OP(SEND_LAST);
1278 qp->s_psn = psn + 1;
1280 if (qp->s_flags & HFI1_S_WAIT_ACK) {
1281 qp->s_flags &= ~HFI1_S_WAIT_ACK;
1282 hfi1_schedule_send(qp);
1284 hfi1_get_credit(qp, aeth);
1285 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1286 qp->s_retry = qp->s_retry_cnt;
1287 update_last_psn(qp, psn);
1291 case 1: /* RNR NAK */
1293 if (qp->s_acked == qp->s_tail)
1295 if (qp->s_flags & HFI1_S_WAIT_RNR)
1297 if (qp->s_rnr_retry == 0) {
1298 status = IB_WC_RNR_RETRY_EXC_ERR;
1301 if (qp->s_rnr_retry_cnt < 7)
1304 /* The last valid PSN is the previous PSN. */
1305 update_last_psn(qp, psn - 1);
1307 ibp->n_rc_resends += delta_psn(qp->s_psn, psn);
1311 qp->s_flags &= ~(HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_ACK);
1312 qp->s_flags |= HFI1_S_WAIT_RNR;
1313 qp->s_timer.function = hfi1_rc_rnr_retry;
1314 qp->s_timer.expires = jiffies + usecs_to_jiffies(
1315 ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
1316 HFI1_AETH_CREDIT_MASK]);
1317 add_timer(&qp->s_timer);
1321 if (qp->s_acked == qp->s_tail)
1323 /* The last valid PSN is the previous PSN. */
1324 update_last_psn(qp, psn - 1);
1325 switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
1326 HFI1_AETH_CREDIT_MASK) {
1327 case 0: /* PSN sequence error */
1330 * Back up to the responder's expected PSN.
1331 * Note that we might get a NAK in the middle of an
1332 * RDMA READ response which terminates the RDMA
1335 restart_rc(qp, psn, 0);
1336 hfi1_schedule_send(qp);
1339 case 1: /* Invalid Request */
1340 status = IB_WC_REM_INV_REQ_ERR;
1341 ibp->n_other_naks++;
1344 case 2: /* Remote Access Error */
1345 status = IB_WC_REM_ACCESS_ERR;
1346 ibp->n_other_naks++;
1349 case 3: /* Remote Operation Error */
1350 status = IB_WC_REM_OP_ERR;
1351 ibp->n_other_naks++;
1353 if (qp->s_last == qp->s_acked) {
1354 hfi1_send_complete(qp, wqe, status);
1355 hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1360 /* Ignore other reserved NAK error codes */
1363 qp->s_retry = qp->s_retry_cnt;
1364 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1367 default: /* 2: reserved */
1369 /* Ignore reserved NAK codes. */
1378 * We have seen an out of sequence RDMA read middle or last packet.
1379 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1381 static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn,
1382 struct hfi1_ctxtdata *rcd)
1384 struct hfi1_swqe *wqe;
1386 /* Remove QP from retry timer */
1387 if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
1388 qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
1389 del_timer(&qp->s_timer);
1392 wqe = get_swqe_ptr(qp, qp->s_acked);
1394 while (cmp_psn(psn, wqe->lpsn) > 0) {
1395 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1396 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1397 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1399 wqe = do_rc_completion(qp, wqe, ibp);
1403 qp->r_flags |= HFI1_R_RDMAR_SEQ;
1404 restart_rc(qp, qp->s_last_psn + 1, 0);
1405 if (list_empty(&qp->rspwait)) {
1406 qp->r_flags |= HFI1_R_RSP_SEND;
1407 atomic_inc(&qp->refcount);
1408 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1413 * rc_rcv_resp - process an incoming RC response packet
1414 * @ibp: the port this packet came in on
1415 * @ohdr: the other headers for this packet
1416 * @data: the packet data
1417 * @tlen: the packet length
1418 * @qp: the QP for this packet
1419 * @opcode: the opcode for this packet
1420 * @psn: the packet sequence number for this packet
1421 * @hdrsize: the header length
1422 * @pmtu: the path MTU
1424 * This is called from hfi1_rc_rcv() to process an incoming RC response
1425 * packet for the given QP.
1426 * Called at interrupt level.
1428 static void rc_rcv_resp(struct hfi1_ibport *ibp,
1429 struct hfi1_other_headers *ohdr,
1430 void *data, u32 tlen, struct hfi1_qp *qp,
1431 u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
1432 struct hfi1_ctxtdata *rcd)
1434 struct hfi1_swqe *wqe;
1435 enum ib_wc_status status;
1436 unsigned long flags;
1442 spin_lock_irqsave(&qp->s_lock, flags);
1444 /* Ignore invalid responses. */
1445 if (cmp_psn(psn, qp->s_next_psn) >= 0)
1448 /* Ignore duplicate responses. */
1449 diff = cmp_psn(psn, qp->s_last_psn);
1450 if (unlikely(diff <= 0)) {
1451 /* Update credits for "ghost" ACKs */
1452 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1453 aeth = be32_to_cpu(ohdr->u.aeth);
1454 if ((aeth >> 29) == 0)
1455 hfi1_get_credit(qp, aeth);
1461 * Skip everything other than the PSN we expect, if we are waiting
1462 * for a reply to a restarted RDMA read or atomic op.
1464 if (qp->r_flags & HFI1_R_RDMAR_SEQ) {
1465 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
1467 qp->r_flags &= ~HFI1_R_RDMAR_SEQ;
1470 if (unlikely(qp->s_acked == qp->s_tail))
1472 wqe = get_swqe_ptr(qp, qp->s_acked);
1473 status = IB_WC_SUCCESS;
1476 case OP(ACKNOWLEDGE):
1477 case OP(ATOMIC_ACKNOWLEDGE):
1478 case OP(RDMA_READ_RESPONSE_FIRST):
1479 aeth = be32_to_cpu(ohdr->u.aeth);
1480 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1481 __be32 *p = ohdr->u.at.atomic_ack_eth;
1483 val = ((u64) be32_to_cpu(p[0]) << 32) |
1487 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1488 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1490 wqe = get_swqe_ptr(qp, qp->s_acked);
1491 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1494 * If this is a response to a resent RDMA read, we
1495 * have to be careful to copy the data to the right
1498 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1502 case OP(RDMA_READ_RESPONSE_MIDDLE):
1503 /* no AETH, no ACK */
1504 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1506 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1509 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1511 if (unlikely(pmtu >= qp->s_rdma_read_len))
1515 * We got a response so update the timeout.
1516 * 4.096 usec. * (1 << qp->timeout)
1518 qp->s_flags |= HFI1_S_TIMER;
1519 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
1520 if (qp->s_flags & HFI1_S_WAIT_ACK) {
1521 qp->s_flags &= ~HFI1_S_WAIT_ACK;
1522 hfi1_schedule_send(qp);
1525 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1526 qp->s_retry = qp->s_retry_cnt;
1529 * Update the RDMA receive state but do the copy w/o
1530 * holding the locks and blocking interrupts.
1532 qp->s_rdma_read_len -= pmtu;
1533 update_last_psn(qp, psn);
1534 spin_unlock_irqrestore(&qp->s_lock, flags);
1535 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1538 case OP(RDMA_READ_RESPONSE_ONLY):
1539 aeth = be32_to_cpu(ohdr->u.aeth);
1540 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1542 /* Get the number of bytes the message was padded by. */
1543 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1545 * Check that the data size is >= 0 && <= pmtu.
1546 * Remember to account for ICRC (4).
1548 if (unlikely(tlen < (hdrsize + pad + 4)))
1551 * If this is a response to a resent RDMA read, we
1552 * have to be careful to copy the data to the right
1555 wqe = get_swqe_ptr(qp, qp->s_acked);
1556 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1560 case OP(RDMA_READ_RESPONSE_LAST):
1561 /* ACKs READ req. */
1562 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1564 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1566 /* Get the number of bytes the message was padded by. */
1567 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1569 * Check that the data size is >= 1 && <= pmtu.
1570 * Remember to account for ICRC (4).
1572 if (unlikely(tlen <= (hdrsize + pad + 4)))
1575 tlen -= hdrsize + pad + 4;
1576 if (unlikely(tlen != qp->s_rdma_read_len))
1578 aeth = be32_to_cpu(ohdr->u.aeth);
1579 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1580 WARN_ON(qp->s_rdma_read_sge.num_sge);
1581 (void) do_rc_ack(qp, aeth, psn,
1582 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1587 status = IB_WC_LOC_QP_OP_ERR;
1591 rdma_seq_err(qp, ibp, psn, rcd);
1595 status = IB_WC_LOC_LEN_ERR;
1597 if (qp->s_last == qp->s_acked) {
1598 hfi1_send_complete(qp, wqe, status);
1599 hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1602 spin_unlock_irqrestore(&qp->s_lock, flags);
1608 * rc_rcv_error - process an incoming duplicate or error RC packet
1609 * @ohdr: the other headers for this packet
1610 * @data: the packet data
1611 * @qp: the QP for this packet
1612 * @opcode: the opcode for this packet
1613 * @psn: the packet sequence number for this packet
1614 * @diff: the difference between the PSN and the expected PSN
1616 * This is called from hfi1_rc_rcv() to process an unexpected
1617 * incoming RC packet for the given QP.
1618 * Called at interrupt level.
1619 * Return 1 if no more processing is needed; otherwise return 0 to
1620 * schedule a response to be sent.
1622 static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
1623 struct hfi1_qp *qp, u32 opcode, u32 psn, int diff,
1624 struct hfi1_ctxtdata *rcd)
1626 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1627 struct hfi1_ack_entry *e;
1628 unsigned long flags;
1634 * Packet sequence error.
1635 * A NAK will ACK earlier sends and RDMA writes.
1636 * Don't queue the NAK if we already sent one.
1638 if (!qp->r_nak_state) {
1640 qp->r_nak_state = IB_NAK_PSN_ERROR;
1641 /* Use the expected PSN. */
1642 qp->r_ack_psn = qp->r_psn;
1644 * Wait to send the sequence NAK until all packets
1645 * in the receive queue have been processed.
1646 * Otherwise, we end up propagating congestion.
1648 if (list_empty(&qp->rspwait)) {
1649 qp->r_flags |= HFI1_R_RSP_NAK;
1650 atomic_inc(&qp->refcount);
1651 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1658 * Handle a duplicate request. Don't re-execute SEND, RDMA
1659 * write or atomic op. Don't NAK errors, just silently drop
1660 * the duplicate request. Note that r_sge, r_len, and
1661 * r_rcv_len may be in use so don't modify them.
1663 * We are supposed to ACK the earliest duplicate PSN but we
1664 * can coalesce an outstanding duplicate ACK. We have to
1665 * send the earliest so that RDMA reads can be restarted at
1666 * the requester's expected PSN.
1668 * First, find where this duplicate PSN falls within the
1669 * ACKs previously sent.
1670 * old_req is true if there is an older response that is scheduled
1671 * to be sent before sending this one.
1677 spin_lock_irqsave(&qp->s_lock, flags);
1679 for (i = qp->r_head_ack_queue; ; i = prev) {
1680 if (i == qp->s_tail_ack_queue)
1685 prev = HFI1_MAX_RDMA_ATOMIC;
1686 if (prev == qp->r_head_ack_queue) {
1690 e = &qp->s_ack_queue[prev];
1695 if (cmp_psn(psn, e->psn) >= 0) {
1696 if (prev == qp->s_tail_ack_queue &&
1697 cmp_psn(psn, e->lpsn) <= 0)
1703 case OP(RDMA_READ_REQUEST): {
1704 struct ib_reth *reth;
1709 * If we didn't find the RDMA read request in the ack queue,
1710 * we can ignore this request.
1712 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1714 /* RETH comes after BTH */
1715 reth = &ohdr->u.rc.reth;
1717 * Address range must be a subset of the original
1718 * request and start on pmtu boundaries.
1719 * We reuse the old ack_queue slot since the requester
1720 * should not back up and request an earlier PSN for the
1723 offset = delta_psn(psn, e->psn) * qp->pmtu;
1724 len = be32_to_cpu(reth->length);
1725 if (unlikely(offset + len != e->rdma_sge.sge_length))
1727 if (e->rdma_sge.mr) {
1728 hfi1_put_mr(e->rdma_sge.mr);
1729 e->rdma_sge.mr = NULL;
1732 u32 rkey = be32_to_cpu(reth->rkey);
1733 u64 vaddr = be64_to_cpu(reth->vaddr);
1736 ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1737 IB_ACCESS_REMOTE_READ);
1741 e->rdma_sge.vaddr = NULL;
1742 e->rdma_sge.length = 0;
1743 e->rdma_sge.sge_length = 0;
1748 qp->s_tail_ack_queue = prev;
1752 case OP(COMPARE_SWAP):
1753 case OP(FETCH_ADD): {
1755 * If we didn't find the atomic request in the ack queue
1756 * or the send tasklet is already backed up to send an
1757 * earlier entry, we can ignore this request.
1759 if (!e || e->opcode != (u8) opcode || old_req)
1761 qp->s_tail_ack_queue = prev;
1767 * Ignore this operation if it doesn't request an ACK
1768 * or an earlier RDMA read or atomic is going to be resent.
1770 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1773 * Resend the most recent ACK if this request is
1774 * after all the previous RDMA reads and atomics.
1776 if (i == qp->r_head_ack_queue) {
1777 spin_unlock_irqrestore(&qp->s_lock, flags);
1778 qp->r_nak_state = 0;
1779 qp->r_ack_psn = qp->r_psn - 1;
1784 * Resend the RDMA read or atomic op which
1785 * ACKs this duplicate request.
1787 qp->s_tail_ack_queue = i;
1790 qp->s_ack_state = OP(ACKNOWLEDGE);
1791 qp->s_flags |= HFI1_S_RESP_PENDING;
1792 qp->r_nak_state = 0;
1793 hfi1_schedule_send(qp);
1796 spin_unlock_irqrestore(&qp->s_lock, flags);
1804 void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err)
1806 unsigned long flags;
1809 spin_lock_irqsave(&qp->s_lock, flags);
1810 lastwqe = hfi1_error_qp(qp, err);
1811 spin_unlock_irqrestore(&qp->s_lock, flags);
1816 ev.device = qp->ibqp.device;
1817 ev.element.qp = &qp->ibqp;
1818 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1819 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1823 static inline void update_ack_queue(struct hfi1_qp *qp, unsigned n)
1828 if (next > HFI1_MAX_RDMA_ATOMIC)
1830 qp->s_tail_ack_queue = next;
1831 qp->s_ack_state = OP(ACKNOWLEDGE);
1834 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
1835 u32 lqpn, u32 rqpn, u8 svc_type)
1837 struct opa_hfi1_cong_log_event_internal *cc_event;
1839 if (sl >= OPA_MAX_SLS)
1842 spin_lock(&ppd->cc_log_lock);
1844 ppd->threshold_cong_event_map[sl/8] |= 1 << (sl % 8);
1845 ppd->threshold_event_counter++;
1847 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
1848 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
1849 ppd->cc_log_idx = 0;
1850 cc_event->lqpn = lqpn & HFI1_QPN_MASK;
1851 cc_event->rqpn = rqpn & HFI1_QPN_MASK;
1853 cc_event->svc_type = svc_type;
1854 cc_event->rlid = rlid;
1855 /* keep timestamp in units of 1.024 usec */
1856 cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
1858 spin_unlock(&ppd->cc_log_lock);
1861 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
1862 u32 rqpn, u8 svc_type)
1864 struct cca_timer *cca_timer;
1865 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
1866 u8 trigger_threshold;
1867 struct cc_state *cc_state;
1869 if (sl >= OPA_MAX_SLS)
1872 cca_timer = &ppd->cca_timer[sl];
1874 cc_state = get_cc_state(ppd);
1876 if (cc_state == NULL)
1880 * 1) increase CCTI (for this SL)
1881 * 2) select IPG (i.e., call set_link_ipg())
1884 ccti_limit = cc_state->cct.ccti_limit;
1885 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
1886 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
1888 cc_state->cong_setting.entries[sl].trigger_threshold;
1890 spin_lock(&ppd->cca_timer_lock);
1892 if (cca_timer->ccti < ccti_limit) {
1893 if (cca_timer->ccti + ccti_incr <= ccti_limit)
1894 cca_timer->ccti += ccti_incr;
1896 cca_timer->ccti = ccti_limit;
1900 spin_unlock(&ppd->cca_timer_lock);
1902 ccti = cca_timer->ccti;
1904 if (!hrtimer_active(&cca_timer->hrtimer)) {
1905 /* ccti_timer is in units of 1.024 usec */
1906 unsigned long nsec = 1024 * ccti_timer;
1908 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
1912 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
1913 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
1917 * hfi1_rc_rcv - process an incoming RC packet
1918 * @rcd: the context pointer
1919 * @hdr: the header of this packet
1920 * @rcv_flags: flags relevant to rcv processing
1921 * @data: the packet data
1922 * @tlen: the packet length
1923 * @qp: the QP for this packet
1925 * This is called from qp_rcv() to process an incoming RC packet
1927 * Called at interrupt level.
1929 void hfi1_rc_rcv(struct hfi1_packet *packet)
1931 struct hfi1_ctxtdata *rcd = packet->rcd;
1932 struct hfi1_ib_header *hdr = packet->hdr;
1933 u32 rcv_flags = packet->rcv_flags;
1934 void *data = packet->ebuf;
1935 u32 tlen = packet->tlen;
1936 struct hfi1_qp *qp = packet->qp;
1937 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1938 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1939 struct hfi1_other_headers *ohdr = packet->ohdr;
1941 u32 hdrsize = packet->hlen;
1945 u32 pmtu = qp->pmtu;
1947 struct ib_reth *reth;
1948 unsigned long flags;
1950 int ret, is_fecn = 0;
1952 bth0 = be32_to_cpu(ohdr->bth[0]);
1953 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
1956 bth1 = be32_to_cpu(ohdr->bth[1]);
1957 if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
1958 if (bth1 & HFI1_BECN_SMASK) {
1959 u16 rlid = qp->remote_ah_attr.dlid;
1962 lqpn = qp->ibqp.qp_num;
1963 rqpn = qp->remote_qpn;
1966 qp->remote_ah_attr.sl,
1970 is_fecn = bth1 & HFI1_FECN_SMASK;
1973 psn = be32_to_cpu(ohdr->bth[2]);
1974 opcode = bth0 >> 24;
1977 * Process responses (ACKs) before anything else. Note that the
1978 * packet sequence number will be for something in the send work
1979 * queue rather than the expected receive packet sequence number.
1980 * In other words, this QP is the requester.
1982 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1983 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1984 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1985 hdrsize, pmtu, rcd);
1991 /* Compute 24 bits worth of difference. */
1992 diff = delta_psn(psn, qp->r_psn);
1993 if (unlikely(diff)) {
1994 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1999 /* Check for opcode sequence errors. */
2000 switch (qp->r_state) {
2001 case OP(SEND_FIRST):
2002 case OP(SEND_MIDDLE):
2003 if (opcode == OP(SEND_MIDDLE) ||
2004 opcode == OP(SEND_LAST) ||
2005 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
2009 case OP(RDMA_WRITE_FIRST):
2010 case OP(RDMA_WRITE_MIDDLE):
2011 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2012 opcode == OP(RDMA_WRITE_LAST) ||
2013 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2018 if (opcode == OP(SEND_MIDDLE) ||
2019 opcode == OP(SEND_LAST) ||
2020 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2021 opcode == OP(RDMA_WRITE_MIDDLE) ||
2022 opcode == OP(RDMA_WRITE_LAST) ||
2023 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2026 * Note that it is up to the requester to not send a new
2027 * RDMA read or atomic operation before receiving an ACK
2028 * for the previous operation.
2033 if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
2036 /* OK, process the packet. */
2038 case OP(SEND_FIRST):
2039 ret = hfi1_get_rwqe(qp, 0);
2046 case OP(SEND_MIDDLE):
2047 case OP(RDMA_WRITE_MIDDLE):
2049 /* Check for invalid length PMTU or posted rwqe len. */
2050 if (unlikely(tlen != (hdrsize + pmtu + 4)))
2052 qp->r_rcv_len += pmtu;
2053 if (unlikely(qp->r_rcv_len > qp->r_len))
2055 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1);
2058 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2060 ret = hfi1_get_rwqe(qp, 1);
2068 case OP(SEND_ONLY_WITH_IMMEDIATE):
2069 ret = hfi1_get_rwqe(qp, 0);
2075 if (opcode == OP(SEND_ONLY))
2076 goto no_immediate_data;
2077 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2078 case OP(SEND_LAST_WITH_IMMEDIATE):
2080 wc.ex.imm_data = ohdr->u.imm_data;
2081 wc.wc_flags = IB_WC_WITH_IMM;
2084 case OP(RDMA_WRITE_LAST):
2089 /* Get the number of bytes the message was padded by. */
2090 pad = (bth0 >> 20) & 3;
2091 /* Check for invalid length. */
2092 /* LAST len should be >= 1 */
2093 if (unlikely(tlen < (hdrsize + pad + 4)))
2095 /* Don't count the CRC. */
2096 tlen -= (hdrsize + pad + 4);
2097 wc.byte_len = tlen + qp->r_rcv_len;
2098 if (unlikely(wc.byte_len > qp->r_len))
2100 hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
2101 hfi1_put_ss(&qp->r_sge);
2103 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
2105 wc.wr_id = qp->r_wr_id;
2106 wc.status = IB_WC_SUCCESS;
2107 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2108 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2109 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2111 wc.opcode = IB_WC_RECV;
2113 wc.src_qp = qp->remote_qpn;
2114 wc.slid = qp->remote_ah_attr.dlid;
2116 * It seems that IB mandates the presence of an SL in a
2117 * work completion only for the UD transport (see section
2118 * 11.4.2 of IBTA Vol. 1).
2120 * However, the way the SL is chosen below is consistent
2121 * with the way that IB/qib works and is trying avoid
2122 * introducing incompatibilities.
2124 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2126 wc.sl = qp->remote_ah_attr.sl;
2127 /* zero fields that are N/A */
2130 wc.dlid_path_bits = 0;
2132 /* Signal completion event if the solicited bit is set. */
2133 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
2134 (bth0 & IB_BTH_SOLICITED) != 0);
2137 case OP(RDMA_WRITE_FIRST):
2138 case OP(RDMA_WRITE_ONLY):
2139 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2140 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2143 reth = &ohdr->u.rc.reth;
2144 qp->r_len = be32_to_cpu(reth->length);
2146 qp->r_sge.sg_list = NULL;
2147 if (qp->r_len != 0) {
2148 u32 rkey = be32_to_cpu(reth->rkey);
2149 u64 vaddr = be64_to_cpu(reth->vaddr);
2152 /* Check rkey & NAK */
2153 ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2154 rkey, IB_ACCESS_REMOTE_WRITE);
2157 qp->r_sge.num_sge = 1;
2159 qp->r_sge.num_sge = 0;
2160 qp->r_sge.sge.mr = NULL;
2161 qp->r_sge.sge.vaddr = NULL;
2162 qp->r_sge.sge.length = 0;
2163 qp->r_sge.sge.sge_length = 0;
2165 if (opcode == OP(RDMA_WRITE_FIRST))
2167 else if (opcode == OP(RDMA_WRITE_ONLY))
2168 goto no_immediate_data;
2169 ret = hfi1_get_rwqe(qp, 1);
2174 wc.ex.imm_data = ohdr->u.rc.imm_data;
2175 wc.wc_flags = IB_WC_WITH_IMM;
2178 case OP(RDMA_READ_REQUEST): {
2179 struct hfi1_ack_entry *e;
2183 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2185 next = qp->r_head_ack_queue + 1;
2186 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
2187 if (next > HFI1_MAX_RDMA_ATOMIC)
2189 spin_lock_irqsave(&qp->s_lock, flags);
2190 if (unlikely(next == qp->s_tail_ack_queue)) {
2191 if (!qp->s_ack_queue[next].sent)
2192 goto nack_inv_unlck;
2193 update_ack_queue(qp, next);
2195 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2196 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2197 hfi1_put_mr(e->rdma_sge.mr);
2198 e->rdma_sge.mr = NULL;
2200 reth = &ohdr->u.rc.reth;
2201 len = be32_to_cpu(reth->length);
2203 u32 rkey = be32_to_cpu(reth->rkey);
2204 u64 vaddr = be64_to_cpu(reth->vaddr);
2207 /* Check rkey & NAK */
2208 ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2209 rkey, IB_ACCESS_REMOTE_READ);
2211 goto nack_acc_unlck;
2213 * Update the next expected PSN. We add 1 later
2214 * below, so only add the remainder here.
2217 qp->r_psn += (len - 1) / pmtu;
2219 e->rdma_sge.mr = NULL;
2220 e->rdma_sge.vaddr = NULL;
2221 e->rdma_sge.length = 0;
2222 e->rdma_sge.sge_length = 0;
2227 e->lpsn = qp->r_psn;
2229 * We need to increment the MSN here instead of when we
2230 * finish sending the result since a duplicate request would
2231 * increment it more than once.
2235 qp->r_state = opcode;
2236 qp->r_nak_state = 0;
2237 qp->r_head_ack_queue = next;
2239 /* Schedule the send tasklet. */
2240 qp->s_flags |= HFI1_S_RESP_PENDING;
2241 hfi1_schedule_send(qp);
2243 spin_unlock_irqrestore(&qp->s_lock, flags);
2249 case OP(COMPARE_SWAP):
2250 case OP(FETCH_ADD): {
2251 struct ib_atomic_eth *ateth;
2252 struct hfi1_ack_entry *e;
2259 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2261 next = qp->r_head_ack_queue + 1;
2262 if (next > HFI1_MAX_RDMA_ATOMIC)
2264 spin_lock_irqsave(&qp->s_lock, flags);
2265 if (unlikely(next == qp->s_tail_ack_queue)) {
2266 if (!qp->s_ack_queue[next].sent)
2267 goto nack_inv_unlck;
2268 update_ack_queue(qp, next);
2270 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2271 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2272 hfi1_put_mr(e->rdma_sge.mr);
2273 e->rdma_sge.mr = NULL;
2275 ateth = &ohdr->u.atomic_eth;
2276 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
2277 be32_to_cpu(ateth->vaddr[1]);
2278 if (unlikely(vaddr & (sizeof(u64) - 1)))
2279 goto nack_inv_unlck;
2280 rkey = be32_to_cpu(ateth->rkey);
2281 /* Check rkey & NAK */
2282 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2284 IB_ACCESS_REMOTE_ATOMIC)))
2285 goto nack_acc_unlck;
2286 /* Perform atomic OP and save result. */
2287 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2288 sdata = be64_to_cpu(ateth->swap_data);
2289 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2290 (u64) atomic64_add_return(sdata, maddr) - sdata :
2291 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2292 be64_to_cpu(ateth->compare_data),
2294 hfi1_put_mr(qp->r_sge.sge.mr);
2295 qp->r_sge.num_sge = 0;
2302 qp->r_state = opcode;
2303 qp->r_nak_state = 0;
2304 qp->r_head_ack_queue = next;
2306 /* Schedule the send tasklet. */
2307 qp->s_flags |= HFI1_S_RESP_PENDING;
2308 hfi1_schedule_send(qp);
2310 spin_unlock_irqrestore(&qp->s_lock, flags);
2317 /* NAK unknown opcodes. */
2321 qp->r_state = opcode;
2322 qp->r_ack_psn = psn;
2323 qp->r_nak_state = 0;
2324 /* Send an ACK if requested or required. */
2325 if (psn & (1 << 31))
2330 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2331 qp->r_ack_psn = qp->r_psn;
2332 /* Queue RNR NAK for later */
2333 if (list_empty(&qp->rspwait)) {
2334 qp->r_flags |= HFI1_R_RSP_NAK;
2335 atomic_inc(&qp->refcount);
2336 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2341 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2342 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2343 qp->r_ack_psn = qp->r_psn;
2344 /* Queue NAK for later */
2345 if (list_empty(&qp->rspwait)) {
2346 qp->r_flags |= HFI1_R_RSP_NAK;
2347 atomic_inc(&qp->refcount);
2348 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2353 spin_unlock_irqrestore(&qp->s_lock, flags);
2355 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2356 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2357 qp->r_ack_psn = qp->r_psn;
2358 /* Queue NAK for later */
2359 if (list_empty(&qp->rspwait)) {
2360 qp->r_flags |= HFI1_R_RSP_NAK;
2361 atomic_inc(&qp->refcount);
2362 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2367 spin_unlock_irqrestore(&qp->s_lock, flags);
2369 hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
2370 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2371 qp->r_ack_psn = qp->r_psn;
2373 hfi1_send_rc_ack(rcd, qp, is_fecn);
2376 void hfi1_rc_hdrerr(
2377 struct hfi1_ctxtdata *rcd,
2378 struct hfi1_ib_header *hdr,
2382 int has_grh = rcv_flags & HFI1_HAS_GRH;
2383 struct hfi1_other_headers *ohdr;
2384 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2392 ohdr = &hdr->u.l.oth;
2394 opcode = be32_to_cpu(ohdr->bth[0]);
2395 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
2398 psn = be32_to_cpu(ohdr->bth[2]);
2401 /* Only deal with RDMA Writes for now */
2402 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
2403 diff = delta_psn(psn, qp->r_psn);
2404 if (!qp->r_nak_state && diff >= 0) {
2406 qp->r_nak_state = IB_NAK_PSN_ERROR;
2407 /* Use the expected PSN. */
2408 qp->r_ack_psn = qp->r_psn;
2410 * Wait to send the sequence
2411 * NAK until all packets
2412 * in the receive queue have
2414 * Otherwise, we end up
2415 * propagating congestion.
2417 if (list_empty(&qp->rspwait)) {
2418 qp->r_flags |= HFI1_R_RSP_NAK;
2419 atomic_inc(&qp->refcount);
2422 &rcd->qp_wait_list);
2424 } /* Out of sequence NAK */
2425 } /* QP Request NAKs */