2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/moduleparam.h>
35 #include <linux/gfp.h>
38 #include <linux/list.h>
39 #include <linux/ratelimit.h>
40 #include <linux/export.h>
41 #include <linux/sizes.h>
45 /* When transmitting messages in rds_send_xmit, we need to emerge from
46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
48 * Also, it seems fairer to not let one busy connection stall all the
51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
52 * it to 0 will restore the old behavior (where we looped until we had
55 static int send_batch_count = SZ_1K;
56 module_param(send_batch_count, int, 0444);
57 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
59 static void rds_send_remove_from_sock(struct list_head *messages, int status);
62 * Reset the send state. Callers must ensure that this doesn't race with
65 void rds_send_reset(struct rds_connection *conn)
67 struct rds_message *rm, *tmp;
70 if (conn->c_xmit_rm) {
72 conn->c_xmit_rm = NULL;
73 /* Tell the user the RDMA op is no longer mapped by the
74 * transport. This isn't entirely true (it's flushed out
75 * independently) but as the connection is down, there's
76 * no ongoing RDMA to/from that memory */
77 rds_message_unmapped(rm);
82 conn->c_xmit_hdr_off = 0;
83 conn->c_xmit_data_off = 0;
84 conn->c_xmit_atomic_sent = 0;
85 conn->c_xmit_rdma_sent = 0;
86 conn->c_xmit_data_sent = 0;
88 conn->c_map_queued = 0;
90 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
91 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
93 /* Mark messages as retransmissions, and move them to the send q */
94 spin_lock_irqsave(&conn->c_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
99 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
100 spin_unlock_irqrestore(&conn->c_lock, flags);
103 static int acquire_in_xmit(struct rds_connection *conn)
105 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
108 static void release_in_xmit(struct rds_connection *conn)
110 clear_bit(RDS_IN_XMIT, &conn->c_flags);
111 smp_mb__after_atomic();
113 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
114 * hot path and finding waiters is very rare. We don't want to walk
115 * the system-wide hashed waitqueue buckets in the fast path only to
116 * almost never find waiters.
118 if (waitqueue_active(&conn->c_waitq))
119 wake_up_all(&conn->c_waitq);
123 * We're making the conscious trade-off here to only send one message
124 * down the connection at a time.
126 * - tx queueing is a simple fifo list
127 * - reassembly is optional and easily done by transports per conn
128 * - no per flow rx lookup at all, straight to the socket
129 * - less per-frag memory and wire overhead
131 * - queued acks can be delayed behind large messages
133 * - small message latency is higher behind queued large messages
134 * - large message latency isn't starved by intervening small sends
136 int rds_send_xmit(struct rds_connection *conn)
138 struct rds_message *rm;
141 struct scatterlist *sg;
143 LIST_HEAD(to_be_dropped);
145 unsigned long send_gen = 0;
151 * sendmsg calls here after having queued its message on the send
152 * queue. We only have one task feeding the connection at a time. If
153 * another thread is already feeding the queue then we back off. This
154 * avoids blocking the caller and trading per-connection data between
155 * caches per message.
157 if (!acquire_in_xmit(conn)) {
158 rds_stats_inc(s_send_lock_contention);
164 * we record the send generation after doing the xmit acquire.
165 * if someone else manages to jump in and do some work, we'll use
166 * this to avoid a goto restart farther down.
168 * The acquire_in_xmit() check above ensures that only one
169 * caller can increment c_send_gen at any time.
172 send_gen = conn->c_send_gen;
175 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
176 * we do the opposite to avoid races.
178 if (!rds_conn_up(conn)) {
179 release_in_xmit(conn);
184 if (conn->c_trans->xmit_prepare)
185 conn->c_trans->xmit_prepare(conn);
188 * spin trying to push headers and data down the connection until
189 * the connection doesn't make forward progress.
193 rm = conn->c_xmit_rm;
196 * If between sending messages, we can send a pending congestion
199 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
200 rm = rds_cong_update_alloc(conn);
205 rm->data.op_active = 1;
207 conn->c_xmit_rm = rm;
211 * If not already working on one, grab the next message.
213 * c_xmit_rm holds a ref while we're sending this message down
214 * the connction. We can use this ref while holding the
215 * send_sem.. rds_send_reset() is serialized with it.
222 /* we want to process as big a batch as we can, but
223 * we also want to avoid softlockups. If we've been
224 * through a lot of messages, lets back off and see
225 * if anyone else jumps in
227 if (batch_count >= send_batch_count)
230 spin_lock_irqsave(&conn->c_lock, flags);
232 if (!list_empty(&conn->c_send_queue)) {
233 rm = list_entry(conn->c_send_queue.next,
236 rds_message_addref(rm);
239 * Move the message from the send queue to the retransmit
242 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
245 spin_unlock_irqrestore(&conn->c_lock, flags);
250 /* Unfortunately, the way Infiniband deals with
251 * RDMA to a bad MR key is by moving the entire
252 * queue pair to error state. We cold possibly
253 * recover from that, but right now we drop the
255 * Therefore, we never retransmit messages with RDMA ops.
257 if (rm->rdma.op_active &&
258 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
259 spin_lock_irqsave(&conn->c_lock, flags);
260 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
261 list_move(&rm->m_conn_item, &to_be_dropped);
262 spin_unlock_irqrestore(&conn->c_lock, flags);
266 /* Require an ACK every once in a while */
267 len = ntohl(rm->m_inc.i_hdr.h_len);
268 if (conn->c_unacked_packets == 0 ||
269 conn->c_unacked_bytes < len) {
270 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
272 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
273 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
274 rds_stats_inc(s_send_ack_required);
276 conn->c_unacked_bytes -= len;
277 conn->c_unacked_packets--;
280 conn->c_xmit_rm = rm;
283 /* The transport either sends the whole rdma or none of it */
284 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
285 rm->m_final_op = &rm->rdma;
286 /* The transport owns the mapped memory for now.
287 * You can't unmap it while it's on the send queue
289 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
290 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
292 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
293 wake_up_interruptible(&rm->m_flush_wait);
296 conn->c_xmit_rdma_sent = 1;
300 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
301 rm->m_final_op = &rm->atomic;
302 /* The transport owns the mapped memory for now.
303 * You can't unmap it while it's on the send queue
305 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
306 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
308 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
309 wake_up_interruptible(&rm->m_flush_wait);
312 conn->c_xmit_atomic_sent = 1;
317 * A number of cases require an RDS header to be sent
318 * even if there is no data.
319 * We permit 0-byte sends; rds-ping depends on this.
320 * However, if there are exclusively attached silent ops,
321 * we skip the hdr/data send, to enable silent operation.
323 if (rm->data.op_nents == 0) {
325 int all_ops_are_silent = 1;
327 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
328 if (rm->atomic.op_active && !rm->atomic.op_silent)
329 all_ops_are_silent = 0;
330 if (rm->rdma.op_active && !rm->rdma.op_silent)
331 all_ops_are_silent = 0;
333 if (ops_present && all_ops_are_silent
334 && !rm->m_rdma_cookie)
335 rm->data.op_active = 0;
338 if (rm->data.op_active && !conn->c_xmit_data_sent) {
339 rm->m_final_op = &rm->data;
340 ret = conn->c_trans->xmit(conn, rm,
341 conn->c_xmit_hdr_off,
343 conn->c_xmit_data_off);
347 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
348 tmp = min_t(int, ret,
349 sizeof(struct rds_header) -
350 conn->c_xmit_hdr_off);
351 conn->c_xmit_hdr_off += tmp;
355 sg = &rm->data.op_sg[conn->c_xmit_sg];
357 tmp = min_t(int, ret, sg->length -
358 conn->c_xmit_data_off);
359 conn->c_xmit_data_off += tmp;
361 if (conn->c_xmit_data_off == sg->length) {
362 conn->c_xmit_data_off = 0;
366 conn->c_xmit_sg == rm->data.op_nents);
370 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
371 (conn->c_xmit_sg == rm->data.op_nents))
372 conn->c_xmit_data_sent = 1;
376 * A rm will only take multiple times through this loop
377 * if there is a data op. Thus, if the data is sent (or there was
378 * none), then we're done with the rm.
380 if (!rm->data.op_active || conn->c_xmit_data_sent) {
381 conn->c_xmit_rm = NULL;
383 conn->c_xmit_hdr_off = 0;
384 conn->c_xmit_data_off = 0;
385 conn->c_xmit_rdma_sent = 0;
386 conn->c_xmit_atomic_sent = 0;
387 conn->c_xmit_data_sent = 0;
394 if (conn->c_trans->xmit_complete)
395 conn->c_trans->xmit_complete(conn);
396 release_in_xmit(conn);
398 /* Nuke any messages we decided not to retransmit. */
399 if (!list_empty(&to_be_dropped)) {
400 /* irqs on here, so we can put(), unlike above */
401 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
403 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
407 * Other senders can queue a message after we last test the send queue
408 * but before we clear RDS_IN_XMIT. In that case they'd back off and
409 * not try and send their newly queued message. We need to check the
410 * send queue after having cleared RDS_IN_XMIT so that their message
411 * doesn't get stuck on the send queue.
413 * If the transport cannot continue (i.e ret != 0), then it must
414 * call us when more room is available, such as from the tx
415 * completion handler.
417 * We have an extra generation check here so that if someone manages
418 * to jump in after our release_in_xmit, we'll see that they have done
419 * some work and we will skip our goto
423 if ((test_bit(0, &conn->c_map_queued) ||
424 !list_empty(&conn->c_send_queue)) &&
425 send_gen == conn->c_send_gen) {
426 rds_stats_inc(s_send_lock_queue_raced);
427 if (batch_count < send_batch_count)
429 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
436 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
438 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
440 assert_spin_locked(&rs->rs_lock);
442 BUG_ON(rs->rs_snd_bytes < len);
443 rs->rs_snd_bytes -= len;
445 if (rs->rs_snd_bytes == 0)
446 rds_stats_inc(s_send_queue_empty);
449 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
450 is_acked_func is_acked)
453 return is_acked(rm, ack);
454 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
458 * This is pretty similar to what happens below in the ACK
459 * handling code - except that we call here as soon as we get
460 * the IB send completion on the RDMA op and the accompanying
463 void rds_rdma_send_complete(struct rds_message *rm, int status)
465 struct rds_sock *rs = NULL;
466 struct rm_rdma_op *ro;
467 struct rds_notifier *notifier;
470 spin_lock_irqsave(&rm->m_rs_lock, flags);
473 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
474 ro->op_active && ro->op_notify && ro->op_notifier) {
475 notifier = ro->op_notifier;
477 sock_hold(rds_rs_to_sk(rs));
479 notifier->n_status = status;
480 spin_lock(&rs->rs_lock);
481 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
482 spin_unlock(&rs->rs_lock);
484 ro->op_notifier = NULL;
487 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
490 rds_wake_sk_sleep(rs);
491 sock_put(rds_rs_to_sk(rs));
494 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
497 * Just like above, except looks at atomic op
499 void rds_atomic_send_complete(struct rds_message *rm, int status)
501 struct rds_sock *rs = NULL;
502 struct rm_atomic_op *ao;
503 struct rds_notifier *notifier;
506 spin_lock_irqsave(&rm->m_rs_lock, flags);
509 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
510 && ao->op_active && ao->op_notify && ao->op_notifier) {
511 notifier = ao->op_notifier;
513 sock_hold(rds_rs_to_sk(rs));
515 notifier->n_status = status;
516 spin_lock(&rs->rs_lock);
517 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
518 spin_unlock(&rs->rs_lock);
520 ao->op_notifier = NULL;
523 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
526 rds_wake_sk_sleep(rs);
527 sock_put(rds_rs_to_sk(rs));
530 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
533 * This is the same as rds_rdma_send_complete except we
534 * don't do any locking - we have all the ingredients (message,
535 * socket, socket lock) and can just move the notifier.
538 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
540 struct rm_rdma_op *ro;
541 struct rm_atomic_op *ao;
544 if (ro->op_active && ro->op_notify && ro->op_notifier) {
545 ro->op_notifier->n_status = status;
546 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
547 ro->op_notifier = NULL;
551 if (ao->op_active && ao->op_notify && ao->op_notifier) {
552 ao->op_notifier->n_status = status;
553 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
554 ao->op_notifier = NULL;
557 /* No need to wake the app - caller does this */
561 * This is called from the IB send completion when we detect
562 * a RDMA operation that failed with remote access error.
563 * So speed is not an issue here.
565 struct rds_message *rds_send_get_message(struct rds_connection *conn,
566 struct rm_rdma_op *op)
568 struct rds_message *rm, *tmp, *found = NULL;
571 spin_lock_irqsave(&conn->c_lock, flags);
573 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
574 if (&rm->rdma == op) {
575 atomic_inc(&rm->m_refcount);
581 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
582 if (&rm->rdma == op) {
583 atomic_inc(&rm->m_refcount);
590 spin_unlock_irqrestore(&conn->c_lock, flags);
594 EXPORT_SYMBOL_GPL(rds_send_get_message);
597 * This removes messages from the socket's list if they're on it. The list
598 * argument must be private to the caller, we must be able to modify it
599 * without locks. The messages must have a reference held for their
600 * position on the list. This function will drop that reference after
601 * removing the messages from the 'messages' list regardless of if it found
602 * the messages on the socket list or not.
604 static void rds_send_remove_from_sock(struct list_head *messages, int status)
607 struct rds_sock *rs = NULL;
608 struct rds_message *rm;
610 while (!list_empty(messages)) {
613 rm = list_entry(messages->next, struct rds_message,
615 list_del_init(&rm->m_conn_item);
618 * If we see this flag cleared then we're *sure* that someone
619 * else beat us to removing it from the sock. If we race
620 * with their flag update we'll get the lock and then really
621 * see that the flag has been cleared.
623 * The message spinlock makes sure nobody clears rm->m_rs
624 * while we're messing with it. It does not prevent the
625 * message from being removed from the socket, though.
627 spin_lock_irqsave(&rm->m_rs_lock, flags);
628 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
629 goto unlock_and_drop;
631 if (rs != rm->m_rs) {
633 rds_wake_sk_sleep(rs);
634 sock_put(rds_rs_to_sk(rs));
638 sock_hold(rds_rs_to_sk(rs));
641 goto unlock_and_drop;
642 spin_lock(&rs->rs_lock);
644 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
645 struct rm_rdma_op *ro = &rm->rdma;
646 struct rds_notifier *notifier;
648 list_del_init(&rm->m_sock_item);
649 rds_send_sndbuf_remove(rs, rm);
651 if (ro->op_active && ro->op_notifier &&
652 (ro->op_notify || (ro->op_recverr && status))) {
653 notifier = ro->op_notifier;
654 list_add_tail(¬ifier->n_list,
655 &rs->rs_notify_queue);
656 if (!notifier->n_status)
657 notifier->n_status = status;
658 rm->rdma.op_notifier = NULL;
663 spin_unlock(&rs->rs_lock);
666 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
673 rds_wake_sk_sleep(rs);
674 sock_put(rds_rs_to_sk(rs));
679 * Transports call here when they've determined that the receiver queued
680 * messages up to, and including, the given sequence number. Messages are
681 * moved to the retrans queue when rds_send_xmit picks them off the send
682 * queue. This means that in the TCP case, the message may not have been
683 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
684 * checks the RDS_MSG_HAS_ACK_SEQ bit.
686 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
687 is_acked_func is_acked)
689 struct rds_message *rm, *tmp;
693 spin_lock_irqsave(&conn->c_lock, flags);
695 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
696 if (!rds_send_is_acked(rm, ack, is_acked))
699 list_move(&rm->m_conn_item, &list);
700 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
703 /* order flag updates with spin locks */
704 if (!list_empty(&list))
705 smp_mb__after_atomic();
707 spin_unlock_irqrestore(&conn->c_lock, flags);
709 /* now remove the messages from the sock list as needed */
710 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
712 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
714 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
716 struct rds_message *rm, *tmp;
717 struct rds_connection *conn;
721 /* get all the messages we're dropping under the rs lock */
722 spin_lock_irqsave(&rs->rs_lock, flags);
724 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
725 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
726 dest->sin_port != rm->m_inc.i_hdr.h_dport))
729 list_move(&rm->m_sock_item, &list);
730 rds_send_sndbuf_remove(rs, rm);
731 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
734 /* order flag updates with the rs lock */
735 smp_mb__after_atomic();
737 spin_unlock_irqrestore(&rs->rs_lock, flags);
739 if (list_empty(&list))
742 /* Remove the messages from the conn */
743 list_for_each_entry(rm, &list, m_sock_item) {
745 conn = rm->m_inc.i_conn;
747 spin_lock_irqsave(&conn->c_lock, flags);
749 * Maybe someone else beat us to removing rm from the conn.
750 * If we race with their flag update we'll get the lock and
751 * then really see that the flag has been cleared.
753 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
754 spin_unlock_irqrestore(&conn->c_lock, flags);
755 spin_lock_irqsave(&rm->m_rs_lock, flags);
757 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
760 list_del_init(&rm->m_conn_item);
761 spin_unlock_irqrestore(&conn->c_lock, flags);
764 * Couldn't grab m_rs_lock in top loop (lock ordering),
767 spin_lock_irqsave(&rm->m_rs_lock, flags);
769 spin_lock(&rs->rs_lock);
770 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
771 spin_unlock(&rs->rs_lock);
774 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
779 rds_wake_sk_sleep(rs);
781 while (!list_empty(&list)) {
782 rm = list_entry(list.next, struct rds_message, m_sock_item);
783 list_del_init(&rm->m_sock_item);
784 rds_message_wait(rm);
786 /* just in case the code above skipped this message
787 * because RDS_MSG_ON_CONN wasn't set, run it again here
788 * taking m_rs_lock is the only thing that keeps us
789 * from racing with ack processing.
791 spin_lock_irqsave(&rm->m_rs_lock, flags);
793 spin_lock(&rs->rs_lock);
794 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
795 spin_unlock(&rs->rs_lock);
798 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
805 * we only want this to fire once so we use the callers 'queued'. It's
806 * possible that another thread can race with us and remove the
807 * message from the flow with RDS_CANCEL_SENT_TO.
809 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
810 struct rds_message *rm, __be16 sport,
811 __be16 dport, int *queued)
819 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
821 /* this is the only place which holds both the socket's rs_lock
822 * and the connection's c_lock */
823 spin_lock_irqsave(&rs->rs_lock, flags);
826 * If there is a little space in sndbuf, we don't queue anything,
827 * and userspace gets -EAGAIN. But poll() indicates there's send
828 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
829 * freed up by incoming acks. So we check the *old* value of
830 * rs_snd_bytes here to allow the last msg to exceed the buffer,
831 * and poll() now knows no more data can be sent.
833 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
834 rs->rs_snd_bytes += len;
836 /* let recv side know we are close to send space exhaustion.
837 * This is probably not the optimal way to do it, as this
838 * means we set the flag on *all* messages as soon as our
839 * throughput hits a certain threshold.
841 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
842 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
844 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
845 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
846 rds_message_addref(rm);
849 /* The code ordering is a little weird, but we're
850 trying to minimize the time we hold c_lock */
851 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
852 rm->m_inc.i_conn = conn;
853 rds_message_addref(rm);
855 spin_lock(&conn->c_lock);
856 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
857 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
858 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
859 spin_unlock(&conn->c_lock);
861 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
862 rm, len, rs, rs->rs_snd_bytes,
863 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
868 spin_unlock_irqrestore(&rs->rs_lock, flags);
874 * rds_message is getting to be quite complicated, and we'd like to allocate
875 * it all in one go. This figures out how big it needs to be up front.
877 static int rds_rm_size(struct msghdr *msg, int data_len)
879 struct cmsghdr *cmsg;
884 for_each_cmsghdr(cmsg, msg) {
885 if (!CMSG_OK(msg, cmsg))
888 if (cmsg->cmsg_level != SOL_RDS)
891 switch (cmsg->cmsg_type) {
892 case RDS_CMSG_RDMA_ARGS:
894 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
901 case RDS_CMSG_RDMA_DEST:
902 case RDS_CMSG_RDMA_MAP:
904 /* these are valid but do no add any size */
907 case RDS_CMSG_ATOMIC_CSWP:
908 case RDS_CMSG_ATOMIC_FADD:
909 case RDS_CMSG_MASKED_ATOMIC_CSWP:
910 case RDS_CMSG_MASKED_ATOMIC_FADD:
912 size += sizeof(struct scatterlist);
921 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
923 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
924 if (cmsg_groups == 3)
930 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
931 struct msghdr *msg, int *allocated_mr)
933 struct cmsghdr *cmsg;
936 for_each_cmsghdr(cmsg, msg) {
937 if (!CMSG_OK(msg, cmsg))
940 if (cmsg->cmsg_level != SOL_RDS)
943 /* As a side effect, RDMA_DEST and RDMA_MAP will set
944 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
946 switch (cmsg->cmsg_type) {
947 case RDS_CMSG_RDMA_ARGS:
948 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
951 case RDS_CMSG_RDMA_DEST:
952 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
955 case RDS_CMSG_RDMA_MAP:
956 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
960 case RDS_CMSG_ATOMIC_CSWP:
961 case RDS_CMSG_ATOMIC_FADD:
962 case RDS_CMSG_MASKED_ATOMIC_CSWP:
963 case RDS_CMSG_MASKED_ATOMIC_FADD:
964 ret = rds_cmsg_atomic(rs, rm, cmsg);
978 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
980 struct sock *sk = sock->sk;
981 struct rds_sock *rs = rds_sk_to_rs(sk);
982 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
985 struct rds_message *rm = NULL;
986 struct rds_connection *conn;
988 int queued = 0, allocated_mr = 0;
989 int nonblock = msg->msg_flags & MSG_DONTWAIT;
990 long timeo = sock_sndtimeo(sk, nonblock);
992 /* Mirror Linux UDP mirror of BSD error message compatibility */
993 /* XXX: Perhaps MSG_MORE someday */
994 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
999 if (msg->msg_namelen) {
1000 /* XXX fail non-unicast destination IPs? */
1001 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1005 daddr = usin->sin_addr.s_addr;
1006 dport = usin->sin_port;
1008 /* We only care about consistency with ->connect() */
1010 daddr = rs->rs_conn_addr;
1011 dport = rs->rs_conn_port;
1015 /* racing with another thread binding seems ok here */
1016 if (daddr == 0 || rs->rs_bound_addr == 0) {
1017 ret = -ENOTCONN; /* XXX not a great errno */
1021 if (payload_len > rds_sk_sndbuf(rs)) {
1026 /* size of rm including all sgs */
1027 ret = rds_rm_size(msg, payload_len);
1031 rm = rds_message_alloc(ret, GFP_KERNEL);
1037 /* Attach data to the rm */
1039 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1040 if (!rm->data.op_sg) {
1044 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1048 rm->data.op_active = 1;
1050 rm->m_daddr = daddr;
1052 /* rds_conn_create has a spinlock that runs with IRQ off.
1053 * Caching the conn in the socket helps a lot. */
1054 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1057 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1058 rs->rs_bound_addr, daddr,
1060 sock->sk->sk_allocation);
1062 ret = PTR_ERR(conn);
1068 /* Parse any control messages the user may have included. */
1069 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1073 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1074 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1075 &rm->rdma, conn->c_trans->xmit_rdma);
1080 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1081 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1082 &rm->atomic, conn->c_trans->xmit_atomic);
1087 rds_conn_connect_if_down(conn);
1089 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1091 rs->rs_seen_congestion = 1;
1095 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1097 rds_stats_inc(s_send_queue_full);
1104 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1105 rds_send_queue_rm(rs, conn, rm,
1110 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1111 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1121 * By now we've committed to the send. We reuse rds_send_worker()
1122 * to retry sends in the rds thread if the transport asks us to.
1124 rds_stats_inc(s_send_queued);
1126 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1127 rds_send_xmit(conn);
1129 rds_message_put(rm);
1133 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1134 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1135 * or in any other way, we need to destroy the MR again */
1137 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1140 rds_message_put(rm);
1145 * Reply to a ping packet.
1148 rds_send_pong(struct rds_connection *conn, __be16 dport)
1150 struct rds_message *rm;
1151 unsigned long flags;
1154 rm = rds_message_alloc(0, GFP_ATOMIC);
1160 rm->m_daddr = conn->c_faddr;
1161 rm->data.op_active = 1;
1163 rds_conn_connect_if_down(conn);
1165 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1169 spin_lock_irqsave(&conn->c_lock, flags);
1170 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1171 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1172 rds_message_addref(rm);
1173 rm->m_inc.i_conn = conn;
1175 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1176 conn->c_next_tx_seq);
1177 conn->c_next_tx_seq++;
1178 spin_unlock_irqrestore(&conn->c_lock, flags);
1180 rds_stats_inc(s_send_queued);
1181 rds_stats_inc(s_send_pong);
1183 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1184 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1186 rds_message_put(rm);
1191 rds_message_put(rm);