2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <rdma/rdma_cm.h>
42 static struct kmem_cache *rds_ib_incoming_slab;
43 static struct kmem_cache *rds_ib_frag_slab;
44 static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
46 void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
48 struct rds_ib_recv_work *recv;
51 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
57 recv->r_wr.next = NULL;
59 recv->r_wr.sg_list = recv->r_sge;
60 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
62 sge = &recv->r_sge[0];
63 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
64 sge->length = sizeof(struct rds_header);
65 sge->lkey = ic->i_mr->lkey;
67 sge = &recv->r_sge[1];
69 sge->length = RDS_FRAG_SIZE;
70 sge->lkey = ic->i_mr->lkey;
75 * The entire 'from' list, including the from element itself, is put on
76 * to the tail of the 'to' list.
78 static void list_splice_entire_tail(struct list_head *from,
81 struct list_head *from_last = from->prev;
83 list_splice_tail(from_last, to);
84 list_add_tail(from_last, to);
87 static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
89 struct list_head *tmp;
91 tmp = xchg(&cache->xfer, NULL);
94 list_splice_entire_tail(tmp, cache->ready);
100 static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
102 struct rds_ib_cache_head *head;
105 cache->percpu = alloc_percpu(struct rds_ib_cache_head);
109 for_each_possible_cpu(cpu) {
110 head = per_cpu_ptr(cache->percpu, cpu);
120 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
124 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
126 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
128 free_percpu(ic->i_cache_incs.percpu);
134 static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
135 struct list_head *caller_list)
137 struct rds_ib_cache_head *head;
140 for_each_possible_cpu(cpu) {
141 head = per_cpu_ptr(cache->percpu, cpu);
143 list_splice_entire_tail(head->first, caller_list);
149 list_splice_entire_tail(cache->ready, caller_list);
154 void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
156 struct rds_ib_incoming *inc;
157 struct rds_ib_incoming *inc_tmp;
158 struct rds_page_frag *frag;
159 struct rds_page_frag *frag_tmp;
162 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
163 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
164 free_percpu(ic->i_cache_incs.percpu);
166 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
167 list_del(&inc->ii_cache_entry);
168 WARN_ON(!list_empty(&inc->ii_frags));
169 kmem_cache_free(rds_ib_incoming_slab, inc);
172 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
173 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
174 free_percpu(ic->i_cache_frags.percpu);
176 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
177 list_del(&frag->f_cache_entry);
178 WARN_ON(!list_empty(&frag->f_item));
179 kmem_cache_free(rds_ib_frag_slab, frag);
184 static void rds_ib_recv_cache_put(struct list_head *new_item,
185 struct rds_ib_refill_cache *cache);
186 static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
189 /* Recycle frag and attached recv buffer f_sg */
190 static void rds_ib_frag_free(struct rds_ib_connection *ic,
191 struct rds_page_frag *frag)
193 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
195 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
198 /* Recycle inc after freeing attached frags */
199 void rds_ib_inc_free(struct rds_incoming *inc)
201 struct rds_ib_incoming *ibinc;
202 struct rds_page_frag *frag;
203 struct rds_page_frag *pos;
204 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
206 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
208 /* Free attached frags */
209 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
210 list_del_init(&frag->f_item);
211 rds_ib_frag_free(ic, frag);
213 BUG_ON(!list_empty(&ibinc->ii_frags));
215 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
216 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
219 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
220 struct rds_ib_recv_work *recv)
223 rds_inc_put(&recv->r_ibinc->ii_inc);
224 recv->r_ibinc = NULL;
227 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
228 rds_ib_frag_free(ic, recv->r_frag);
233 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
237 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
238 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
241 static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic)
243 struct rds_ib_incoming *ibinc;
244 struct list_head *cache_item;
247 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
249 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
251 avail_allocs = atomic_add_unless(&rds_ib_allocation,
252 1, rds_ib_sysctl_max_recv_allocation);
254 rds_ib_stats_inc(s_ib_rx_alloc_limit);
257 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, GFP_NOWAIT);
259 atomic_dec(&rds_ib_allocation);
263 INIT_LIST_HEAD(&ibinc->ii_frags);
264 rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
269 static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic)
271 struct rds_page_frag *frag;
272 struct list_head *cache_item;
275 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
277 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
279 frag = kmem_cache_alloc(rds_ib_frag_slab, GFP_NOWAIT);
283 ret = rds_page_remainder_alloc(&frag->f_sg,
284 RDS_FRAG_SIZE, GFP_NOWAIT);
286 kmem_cache_free(rds_ib_frag_slab, frag);
291 INIT_LIST_HEAD(&frag->f_item);
296 static int rds_ib_recv_refill_one(struct rds_connection *conn,
297 struct rds_ib_recv_work *recv)
299 struct rds_ib_connection *ic = conn->c_transport_data;
303 if (!ic->i_cache_incs.ready)
304 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
305 if (!ic->i_cache_frags.ready)
306 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
309 * ibinc was taken from recv if recv contained the start of a message.
310 * recvs that were continuations will still have this allocated.
312 if (!recv->r_ibinc) {
313 recv->r_ibinc = rds_ib_refill_one_inc(ic);
318 WARN_ON(recv->r_frag); /* leak! */
319 recv->r_frag = rds_ib_refill_one_frag(ic);
323 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
327 sge = &recv->r_sge[0];
328 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
329 sge->length = sizeof(struct rds_header);
331 sge = &recv->r_sge[1];
332 sge->addr = sg_dma_address(&recv->r_frag->f_sg);
333 sge->length = sg_dma_len(&recv->r_frag->f_sg);
341 * This tries to allocate and post unused work requests after making sure that
342 * they have all the allocations they need to queue received fragments into
345 * -1 is returned if posting fails due to temporary resource exhaustion.
347 int rds_ib_recv_refill(struct rds_connection *conn, int prefill)
349 struct rds_ib_connection *ic = conn->c_transport_data;
350 struct rds_ib_recv_work *recv;
351 struct ib_recv_wr *failed_wr;
352 unsigned int posted = 0;
356 while ((prefill || rds_conn_up(conn)) &&
357 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
358 if (pos >= ic->i_recv_ring.w_nr) {
359 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
365 recv = &ic->i_recvs[pos];
366 ret = rds_ib_recv_refill_one(conn, recv);
372 /* XXX when can this fail? */
373 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
374 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
375 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
376 (long) sg_dma_address(&recv->r_frag->f_sg), ret);
378 rds_ib_conn_error(conn, "recv post on "
379 "%pI4 returned %d, disconnecting and "
380 "reconnecting\n", &conn->c_faddr,
389 /* We're doing flow control - update the window. */
390 if (ic->i_flowctl && posted)
391 rds_ib_advertise_credits(conn, posted);
394 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
399 * We want to recycle several types of recv allocations, like incs and frags.
400 * To use this, the *_free() function passes in the ptr to a list_head within
401 * the recyclee, as well as the cache to put it on.
403 * First, we put the memory on a percpu list. When this reaches a certain size,
404 * We move it to an intermediate non-percpu list in a lockless manner, with some
405 * xchg/compxchg wizardry.
407 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
408 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
409 * list_empty() will return true with one element is actually present.
411 static void rds_ib_recv_cache_put(struct list_head *new_item,
412 struct rds_ib_refill_cache *cache)
415 struct rds_ib_cache_head *chp;
416 struct list_head *old;
418 local_irq_save(flags);
420 chp = per_cpu_ptr(cache->percpu, smp_processor_id());
422 INIT_LIST_HEAD(new_item);
423 else /* put on front */
424 list_add_tail(new_item, chp->first);
425 chp->first = new_item;
428 if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
432 * Return our per-cpu first list to the cache's xfer by atomically
433 * grabbing the current xfer list, appending it to our per-cpu list,
434 * and then atomically returning that entire list back to the
435 * cache's xfer list as long as it's still empty.
438 old = xchg(&cache->xfer, NULL);
440 list_splice_entire_tail(old, chp->first);
441 old = cmpxchg(&cache->xfer, NULL, chp->first);
447 local_irq_restore(flags);
450 static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
452 struct list_head *head = cache->ready;
455 if (!list_empty(head)) {
456 cache->ready = head->next;
465 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
468 struct rds_ib_incoming *ibinc;
469 struct rds_page_frag *frag;
470 struct iovec *iov = first_iov;
471 unsigned long to_copy;
472 unsigned long frag_off = 0;
473 unsigned long iov_off = 0;
478 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
479 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
480 len = be32_to_cpu(inc->i_hdr.h_len);
482 while (copied < size && copied < len) {
483 if (frag_off == RDS_FRAG_SIZE) {
484 frag = list_entry(frag->f_item.next,
485 struct rds_page_frag, f_item);
488 while (iov_off == iov->iov_len) {
493 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
494 to_copy = min_t(size_t, to_copy, size - copied);
495 to_copy = min_t(unsigned long, to_copy, len - copied);
497 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
499 to_copy, iov->iov_base, iov->iov_len, iov_off,
500 sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
502 /* XXX needs + offset for multiple recvs per page */
503 ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
504 frag->f_sg.offset + frag_off,
505 iov->iov_base + iov_off,
520 /* ic starts out kzalloc()ed */
521 void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
523 struct ib_send_wr *wr = &ic->i_ack_wr;
524 struct ib_sge *sge = &ic->i_ack_sge;
526 sge->addr = ic->i_ack_dma;
527 sge->length = sizeof(struct rds_header);
528 sge->lkey = ic->i_mr->lkey;
532 wr->opcode = IB_WR_SEND;
533 wr->wr_id = RDS_IB_ACK_WR_ID;
534 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
538 * You'd think that with reliable IB connections you wouldn't need to ack
539 * messages that have been received. The problem is that IB hardware generates
540 * an ack message before it has DMAed the message into memory. This creates a
541 * potential message loss if the HCA is disabled for any reason between when it
542 * sends the ack and before the message is DMAed and processed. This is only a
543 * potential issue if another HCA is available for fail-over.
545 * When the remote host receives our ack they'll free the sent message from
546 * their send queue. To decrease the latency of this we always send an ack
547 * immediately after we've received messages.
549 * For simplicity, we only have one ack in flight at a time. This puts
550 * pressure on senders to have deep enough send queues to absorb the latency of
551 * a single ack frame being in flight. This might not be good enough.
553 * This is implemented by have a long-lived send_wr and sge which point to a
554 * statically allocated ack frame. This ack wr does not fall under the ring
555 * accounting that the tx and rx wrs do. The QP attribute specifically makes
556 * room for it beyond the ring size. Send completion notices its special
557 * wr_id and avoids working with the ring in that case.
559 #ifndef KERNEL_HAS_ATOMIC64
560 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
565 spin_lock_irqsave(&ic->i_ack_lock, flags);
566 ic->i_ack_next = seq;
568 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
569 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
572 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
577 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
579 spin_lock_irqsave(&ic->i_ack_lock, flags);
580 seq = ic->i_ack_next;
581 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
586 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
589 atomic64_set(&ic->i_ack_next, seq);
591 smp_mb__before_clear_bit();
592 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
596 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
598 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
599 smp_mb__after_clear_bit();
601 return atomic64_read(&ic->i_ack_next);
606 static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
608 struct rds_header *hdr = ic->i_ack;
609 struct ib_send_wr *failed_wr;
613 seq = rds_ib_get_ack(ic);
615 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
616 rds_message_populate_header(hdr, 0, 0, 0);
617 hdr->h_ack = cpu_to_be64(seq);
618 hdr->h_credit = adv_credits;
619 rds_message_make_checksum(hdr);
620 ic->i_ack_queued = jiffies;
622 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
624 /* Failed to send. Release the WR, and
627 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
628 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
630 rds_ib_stats_inc(s_ib_ack_send_failure);
632 rds_ib_conn_error(ic->conn, "sending ack failed\n");
634 rds_ib_stats_inc(s_ib_ack_sent);
638 * There are 3 ways of getting acknowledgements to the peer:
639 * 1. We call rds_ib_attempt_ack from the recv completion handler
640 * to send an ACK-only frame.
641 * However, there can be only one such frame in the send queue
642 * at any time, so we may have to postpone it.
643 * 2. When another (data) packet is transmitted while there's
644 * an ACK in the queue, we piggyback the ACK sequence number
645 * on the data packet.
646 * 3. If the ACK WR is done sending, we get called from the
647 * send queue completion handler, and check whether there's
648 * another ACK pending (postponed because the WR was on the
649 * queue). If so, we transmit it.
651 * We maintain 2 variables:
652 * - i_ack_flags, which keeps track of whether the ACK WR
653 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
654 * - i_ack_next, which is the last sequence number we received
656 * Potentially, send queue and receive queue handlers can run concurrently.
657 * It would be nice to not have to use a spinlock to synchronize things,
658 * but the one problem that rules this out is that 64bit updates are
659 * not atomic on all platforms. Things would be a lot simpler if
660 * we had atomic64 or maybe cmpxchg64 everywhere.
662 * Reconnecting complicates this picture just slightly. When we
663 * reconnect, we may be seeing duplicate packets. The peer
664 * is retransmitting them, because it hasn't seen an ACK for
665 * them. It is important that we ACK these.
667 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
668 * this flag set *MUST* be acknowledged immediately.
672 * When we get here, we're called from the recv queue handler.
673 * Check whether we ought to transmit an ACK.
675 void rds_ib_attempt_ack(struct rds_ib_connection *ic)
677 unsigned int adv_credits;
679 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
682 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
683 rds_ib_stats_inc(s_ib_ack_send_delayed);
687 /* Can we get a send credit? */
688 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
689 rds_ib_stats_inc(s_ib_tx_throttle);
690 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
694 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
695 rds_ib_send_ack(ic, adv_credits);
699 * We get here from the send completion handler, when the
700 * adapter tells us the ACK frame was sent.
702 void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
704 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
705 rds_ib_attempt_ack(ic);
709 * This is called by the regular xmit code when it wants to piggyback
710 * an ACK on an outgoing frame.
712 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
714 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
715 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
716 return rds_ib_get_ack(ic);
720 * It's kind of lame that we're copying from the posted receive pages into
721 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
722 * them. But receiving new congestion bitmaps should be a *rare* event, so
723 * hopefully we won't need to invest that complexity in making it more
724 * efficient. By copying we can share a simpler core with TCP which has to
727 static void rds_ib_cong_recv(struct rds_connection *conn,
728 struct rds_ib_incoming *ibinc)
730 struct rds_cong_map *map;
731 unsigned int map_off;
732 unsigned int map_page;
733 struct rds_page_frag *frag;
734 unsigned long frag_off;
735 unsigned long to_copy;
736 unsigned long copied;
737 uint64_t uncongested = 0;
740 /* catch completely corrupt packets */
741 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
748 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
753 while (copied < RDS_CONG_MAP_BYTES) {
757 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
758 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
760 addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
762 src = addr + frag_off;
763 dst = (void *)map->m_page_addrs[map_page] + map_off;
764 for (k = 0; k < to_copy; k += 8) {
765 /* Record ports that became uncongested, ie
766 * bits that changed from 0 to 1. */
767 uncongested |= ~(*src) & *dst;
770 kunmap_atomic(addr, KM_SOFTIRQ0);
775 if (map_off == PAGE_SIZE) {
781 if (frag_off == RDS_FRAG_SIZE) {
782 frag = list_entry(frag->f_item.next,
783 struct rds_page_frag, f_item);
788 /* the congestion map is in little endian order */
789 uncongested = le64_to_cpu(uncongested);
791 rds_cong_map_updated(map, uncongested);
795 * Rings are posted with all the allocations they'll need to queue the
796 * incoming message to the receiving socket so this can't fail.
797 * All fragments start with a header, so we can make sure we're not receiving
798 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
800 struct rds_ib_ack_state {
803 unsigned int ack_required:1;
804 unsigned int ack_next_valid:1;
805 unsigned int ack_recv_valid:1;
808 static void rds_ib_process_recv(struct rds_connection *conn,
809 struct rds_ib_recv_work *recv, u32 data_len,
810 struct rds_ib_ack_state *state)
812 struct rds_ib_connection *ic = conn->c_transport_data;
813 struct rds_ib_incoming *ibinc = ic->i_ibinc;
814 struct rds_header *ihdr, *hdr;
816 /* XXX shut down the connection if port 0,0 are seen? */
818 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
821 if (data_len < sizeof(struct rds_header)) {
822 rds_ib_conn_error(conn, "incoming message "
823 "from %pI4 didn't inclue a "
824 "header, disconnecting and "
829 data_len -= sizeof(struct rds_header);
831 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
833 /* Validate the checksum. */
834 if (!rds_message_verify_checksum(ihdr)) {
835 rds_ib_conn_error(conn, "incoming message "
836 "from %pI4 has corrupted header - "
837 "forcing a reconnect\n",
839 rds_stats_inc(s_recv_drop_bad_checksum);
843 /* Process the ACK sequence which comes with every packet */
844 state->ack_recv = be64_to_cpu(ihdr->h_ack);
845 state->ack_recv_valid = 1;
847 /* Process the credits update if there was one */
849 rds_ib_send_add_credits(conn, ihdr->h_credit);
851 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
852 /* This is an ACK-only packet. The fact that it gets
853 * special treatment here is that historically, ACKs
854 * were rather special beasts.
856 rds_ib_stats_inc(s_ib_ack_received);
859 * Usually the frags make their way on to incs and are then freed as
860 * the inc is freed. We don't go that route, so we have to drop the
861 * page ref ourselves. We can't just leave the page on the recv
862 * because that confuses the dma mapping of pages and each recv's use
865 * FIXME: Fold this into the code path below.
867 rds_ib_frag_free(ic, recv->r_frag);
873 * If we don't already have an inc on the connection then this
874 * fragment has a header and starts a message.. copy its header
875 * into the inc and save the inc so we can hang upcoming fragments
879 ibinc = recv->r_ibinc;
880 recv->r_ibinc = NULL;
883 hdr = &ibinc->ii_inc.i_hdr;
884 memcpy(hdr, ihdr, sizeof(*hdr));
885 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
887 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
888 ic->i_recv_data_rem, hdr->h_flags);
890 hdr = &ibinc->ii_inc.i_hdr;
891 /* We can't just use memcmp here; fragments of a
892 * single message may carry different ACKs */
893 if (hdr->h_sequence != ihdr->h_sequence ||
894 hdr->h_len != ihdr->h_len ||
895 hdr->h_sport != ihdr->h_sport ||
896 hdr->h_dport != ihdr->h_dport) {
897 rds_ib_conn_error(conn,
898 "fragment header mismatch; forcing reconnect\n");
903 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
906 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
907 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
909 ic->i_recv_data_rem = 0;
912 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
913 rds_ib_cong_recv(conn, ibinc);
915 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
916 &ibinc->ii_inc, GFP_ATOMIC,
918 state->ack_next = be64_to_cpu(hdr->h_sequence);
919 state->ack_next_valid = 1;
922 /* Evaluate the ACK_REQUIRED flag *after* we received
923 * the complete frame, and after bumping the next_rx
925 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
926 rds_stats_inc(s_recv_ack_required);
927 state->ack_required = 1;
930 rds_inc_put(&ibinc->ii_inc);
935 * Plucking the oldest entry from the ring can be done concurrently with
936 * the thread refilling the ring. Each ring operation is protected by
937 * spinlocks and the transient state of refilling doesn't change the
938 * recording of which entry is oldest.
940 * This relies on IB only calling one cq comp_handler for each cq so that
941 * there will only be one caller of rds_recv_incoming() per RDS connection.
943 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
945 struct rds_connection *conn = context;
946 struct rds_ib_connection *ic = conn->c_transport_data;
948 rdsdebug("conn %p cq %p\n", conn, cq);
950 rds_ib_stats_inc(s_ib_rx_cq_call);
952 tasklet_schedule(&ic->i_recv_tasklet);
955 static inline void rds_poll_cq(struct rds_ib_connection *ic,
956 struct rds_ib_ack_state *state)
958 struct rds_connection *conn = ic->conn;
960 struct rds_ib_recv_work *recv;
962 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
963 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
964 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
965 be32_to_cpu(wc.ex.imm_data));
966 rds_ib_stats_inc(s_ib_rx_cq_event);
968 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
970 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
973 * Also process recvs in connecting state because it is possible
974 * to get a recv completion _before_ the rdmacm ESTABLISHED
975 * event is processed.
977 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
978 /* We expect errors as the qp is drained during shutdown */
979 if (wc.status == IB_WC_SUCCESS) {
980 rds_ib_process_recv(conn, recv, wc.byte_len, state);
982 rds_ib_conn_error(conn, "recv completion on "
983 "%pI4 had status %u, disconnecting and "
984 "reconnecting\n", &conn->c_faddr,
989 rds_ib_ring_free(&ic->i_recv_ring, 1);
993 void rds_ib_recv_tasklet_fn(unsigned long data)
995 struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
996 struct rds_connection *conn = ic->conn;
997 struct rds_ib_ack_state state = { 0, };
999 rds_poll_cq(ic, &state);
1000 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
1001 rds_poll_cq(ic, &state);
1003 if (state.ack_next_valid)
1004 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
1005 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
1006 rds_send_drop_acked(conn, state.ack_recv, NULL);
1007 ic->i_ack_recv = state.ack_recv;
1009 if (rds_conn_up(conn))
1010 rds_ib_attempt_ack(ic);
1012 /* If we ever end up with a really empty receive ring, we're
1013 * in deep trouble, as the sender will definitely see RNR
1015 if (rds_ib_ring_empty(&ic->i_recv_ring))
1016 rds_ib_stats_inc(s_ib_rx_ring_empty);
1018 if (rds_ib_ring_low(&ic->i_recv_ring))
1019 rds_ib_recv_refill(conn, 0);
1022 int rds_ib_recv(struct rds_connection *conn)
1024 struct rds_ib_connection *ic = conn->c_transport_data;
1027 rdsdebug("conn %p\n", conn);
1028 if (rds_conn_up(conn))
1029 rds_ib_attempt_ack(ic);
1034 int __init rds_ib_recv_init(void)
1039 /* Default to 30% of all available RAM for recv memory */
1041 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1043 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
1044 sizeof(struct rds_ib_incoming),
1046 if (!rds_ib_incoming_slab)
1049 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1050 sizeof(struct rds_page_frag),
1052 if (!rds_ib_frag_slab)
1053 kmem_cache_destroy(rds_ib_incoming_slab);
1060 void rds_ib_recv_exit(void)
1062 kmem_cache_destroy(rds_ib_incoming_slab);
1063 kmem_cache_destroy(rds_ib_frag_slab);