]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/rds/send.c
b0acd45a9d42ae7abc86f3a028aabffe9d6576f4
[karo-tx-linux.git] / net / rds / send.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/moduleparam.h>
35 #include <linux/gfp.h>
36 #include <net/sock.h>
37 #include <linux/in.h>
38 #include <linux/list.h>
39 #include <linux/ratelimit.h>
40 #include <linux/export.h>
41 #include <linux/sizes.h>
42
43 #include "rds.h"
44
45 /* When transmitting messages in rds_send_xmit, we need to emerge from
46  * time to time and briefly release the CPU. Otherwise the softlock watchdog
47  * will kick our shin.
48  * Also, it seems fairer to not let one busy connection stall all the
49  * others.
50  *
51  * send_batch_count is the number of times we'll loop in send_xmit. Setting
52  * it to 0 will restore the old behavior (where we looped until we had
53  * drained the queue).
54  */
55 static int send_batch_count = SZ_1K;
56 module_param(send_batch_count, int, 0444);
57 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
59 static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
61 /*
62  * Reset the send state.  Callers must ensure that this doesn't race with
63  * rds_send_xmit().
64  */
65 void rds_send_reset(struct rds_connection *conn)
66 {
67         struct rds_message *rm, *tmp;
68         unsigned long flags;
69
70         if (conn->c_xmit_rm) {
71                 rm = conn->c_xmit_rm;
72                 conn->c_xmit_rm = NULL;
73                 /* Tell the user the RDMA op is no longer mapped by the
74                  * transport. This isn't entirely true (it's flushed out
75                  * independently) but as the connection is down, there's
76                  * no ongoing RDMA to/from that memory */
77                 rds_message_unmapped(rm);
78                 rds_message_put(rm);
79         }
80
81         conn->c_xmit_sg = 0;
82         conn->c_xmit_hdr_off = 0;
83         conn->c_xmit_data_off = 0;
84         conn->c_xmit_atomic_sent = 0;
85         conn->c_xmit_rdma_sent = 0;
86         conn->c_xmit_data_sent = 0;
87
88         conn->c_map_queued = 0;
89
90         conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
91         conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
92
93         /* Mark messages as retransmissions, and move them to the send q */
94         spin_lock_irqsave(&conn->c_lock, flags);
95         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
96                 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97                 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98         }
99         list_splice_init(&conn->c_retrans, &conn->c_send_queue);
100         spin_unlock_irqrestore(&conn->c_lock, flags);
101 }
102
103 static int acquire_in_xmit(struct rds_connection *conn)
104 {
105         return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
106 }
107
108 static void release_in_xmit(struct rds_connection *conn)
109 {
110         clear_bit(RDS_IN_XMIT, &conn->c_flags);
111         smp_mb__after_atomic();
112         /*
113          * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
114          * hot path and finding waiters is very rare.  We don't want to walk
115          * the system-wide hashed waitqueue buckets in the fast path only to
116          * almost never find waiters.
117          */
118         if (waitqueue_active(&conn->c_waitq))
119                 wake_up_all(&conn->c_waitq);
120 }
121
122 /*
123  * We're making the conscious trade-off here to only send one message
124  * down the connection at a time.
125  *   Pro:
126  *      - tx queueing is a simple fifo list
127  *      - reassembly is optional and easily done by transports per conn
128  *      - no per flow rx lookup at all, straight to the socket
129  *      - less per-frag memory and wire overhead
130  *   Con:
131  *      - queued acks can be delayed behind large messages
132  *   Depends:
133  *      - small message latency is higher behind queued large messages
134  *      - large message latency isn't starved by intervening small sends
135  */
136 int rds_send_xmit(struct rds_connection *conn)
137 {
138         struct rds_message *rm;
139         unsigned long flags;
140         unsigned int tmp;
141         struct scatterlist *sg;
142         int ret = 0;
143         LIST_HEAD(to_be_dropped);
144         int batch_count;
145         unsigned long send_gen = 0;
146
147 restart:
148         batch_count = 0;
149
150         /*
151          * sendmsg calls here after having queued its message on the send
152          * queue.  We only have one task feeding the connection at a time.  If
153          * another thread is already feeding the queue then we back off.  This
154          * avoids blocking the caller and trading per-connection data between
155          * caches per message.
156          */
157         if (!acquire_in_xmit(conn)) {
158                 rds_stats_inc(s_send_lock_contention);
159                 ret = -ENOMEM;
160                 goto out;
161         }
162
163         /*
164          * we record the send generation after doing the xmit acquire.
165          * if someone else manages to jump in and do some work, we'll use
166          * this to avoid a goto restart farther down.
167          *
168          * The acquire_in_xmit() check above ensures that only one
169          * caller can increment c_send_gen at any time.
170          */
171         conn->c_send_gen++;
172         send_gen = conn->c_send_gen;
173
174         /*
175          * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
176          * we do the opposite to avoid races.
177          */
178         if (!rds_conn_up(conn)) {
179                 release_in_xmit(conn);
180                 ret = 0;
181                 goto out;
182         }
183
184         if (conn->c_trans->xmit_prepare)
185                 conn->c_trans->xmit_prepare(conn);
186
187         /*
188          * spin trying to push headers and data down the connection until
189          * the connection doesn't make forward progress.
190          */
191         while (1) {
192
193                 rm = conn->c_xmit_rm;
194
195                 /*
196                  * If between sending messages, we can send a pending congestion
197                  * map update.
198                  */
199                 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
200                         rm = rds_cong_update_alloc(conn);
201                         if (IS_ERR(rm)) {
202                                 ret = PTR_ERR(rm);
203                                 break;
204                         }
205                         rm->data.op_active = 1;
206
207                         conn->c_xmit_rm = rm;
208                 }
209
210                 /*
211                  * If not already working on one, grab the next message.
212                  *
213                  * c_xmit_rm holds a ref while we're sending this message down
214                  * the connction.  We can use this ref while holding the
215                  * send_sem.. rds_send_reset() is serialized with it.
216                  */
217                 if (!rm) {
218                         unsigned int len;
219
220                         batch_count++;
221
222                         /* we want to process as big a batch as we can, but
223                          * we also want to avoid softlockups.  If we've been
224                          * through a lot of messages, lets back off and see
225                          * if anyone else jumps in
226                          */
227                         if (batch_count >= send_batch_count)
228                                 goto over_batch;
229
230                         spin_lock_irqsave(&conn->c_lock, flags);
231
232                         if (!list_empty(&conn->c_send_queue)) {
233                                 rm = list_entry(conn->c_send_queue.next,
234                                                 struct rds_message,
235                                                 m_conn_item);
236                                 rds_message_addref(rm);
237
238                                 /*
239                                  * Move the message from the send queue to the retransmit
240                                  * list right away.
241                                  */
242                                 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
243                         }
244
245                         spin_unlock_irqrestore(&conn->c_lock, flags);
246
247                         if (!rm)
248                                 break;
249
250                         /* Unfortunately, the way Infiniband deals with
251                          * RDMA to a bad MR key is by moving the entire
252                          * queue pair to error state. We cold possibly
253                          * recover from that, but right now we drop the
254                          * connection.
255                          * Therefore, we never retransmit messages with RDMA ops.
256                          */
257                         if (rm->rdma.op_active &&
258                             test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
259                                 spin_lock_irqsave(&conn->c_lock, flags);
260                                 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
261                                         list_move(&rm->m_conn_item, &to_be_dropped);
262                                 spin_unlock_irqrestore(&conn->c_lock, flags);
263                                 continue;
264                         }
265
266                         /* Require an ACK every once in a while */
267                         len = ntohl(rm->m_inc.i_hdr.h_len);
268                         if (conn->c_unacked_packets == 0 ||
269                             conn->c_unacked_bytes < len) {
270                                 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
271
272                                 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
273                                 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
274                                 rds_stats_inc(s_send_ack_required);
275                         } else {
276                                 conn->c_unacked_bytes -= len;
277                                 conn->c_unacked_packets--;
278                         }
279
280                         conn->c_xmit_rm = rm;
281                 }
282
283                 /* The transport either sends the whole rdma or none of it */
284                 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
285                         rm->m_final_op = &rm->rdma;
286                         /* The transport owns the mapped memory for now.
287                          * You can't unmap it while it's on the send queue
288                          */
289                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
290                         ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
291                         if (ret) {
292                                 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
293                                 wake_up_interruptible(&rm->m_flush_wait);
294                                 break;
295                         }
296                         conn->c_xmit_rdma_sent = 1;
297
298                 }
299
300                 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
301                         rm->m_final_op = &rm->atomic;
302                         /* The transport owns the mapped memory for now.
303                          * You can't unmap it while it's on the send queue
304                          */
305                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
306                         ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
307                         if (ret) {
308                                 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
309                                 wake_up_interruptible(&rm->m_flush_wait);
310                                 break;
311                         }
312                         conn->c_xmit_atomic_sent = 1;
313
314                 }
315
316                 /*
317                  * A number of cases require an RDS header to be sent
318                  * even if there is no data.
319                  * We permit 0-byte sends; rds-ping depends on this.
320                  * However, if there are exclusively attached silent ops,
321                  * we skip the hdr/data send, to enable silent operation.
322                  */
323                 if (rm->data.op_nents == 0) {
324                         int ops_present;
325                         int all_ops_are_silent = 1;
326
327                         ops_present = (rm->atomic.op_active || rm->rdma.op_active);
328                         if (rm->atomic.op_active && !rm->atomic.op_silent)
329                                 all_ops_are_silent = 0;
330                         if (rm->rdma.op_active && !rm->rdma.op_silent)
331                                 all_ops_are_silent = 0;
332
333                         if (ops_present && all_ops_are_silent
334                             && !rm->m_rdma_cookie)
335                                 rm->data.op_active = 0;
336                 }
337
338                 if (rm->data.op_active && !conn->c_xmit_data_sent) {
339                         rm->m_final_op = &rm->data;
340                         ret = conn->c_trans->xmit(conn, rm,
341                                                   conn->c_xmit_hdr_off,
342                                                   conn->c_xmit_sg,
343                                                   conn->c_xmit_data_off);
344                         if (ret <= 0)
345                                 break;
346
347                         if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
348                                 tmp = min_t(int, ret,
349                                             sizeof(struct rds_header) -
350                                             conn->c_xmit_hdr_off);
351                                 conn->c_xmit_hdr_off += tmp;
352                                 ret -= tmp;
353                         }
354
355                         sg = &rm->data.op_sg[conn->c_xmit_sg];
356                         while (ret) {
357                                 tmp = min_t(int, ret, sg->length -
358                                                       conn->c_xmit_data_off);
359                                 conn->c_xmit_data_off += tmp;
360                                 ret -= tmp;
361                                 if (conn->c_xmit_data_off == sg->length) {
362                                         conn->c_xmit_data_off = 0;
363                                         sg++;
364                                         conn->c_xmit_sg++;
365                                         BUG_ON(ret != 0 &&
366                                                conn->c_xmit_sg == rm->data.op_nents);
367                                 }
368                         }
369
370                         if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
371                             (conn->c_xmit_sg == rm->data.op_nents))
372                                 conn->c_xmit_data_sent = 1;
373                 }
374
375                 /*
376                  * A rm will only take multiple times through this loop
377                  * if there is a data op. Thus, if the data is sent (or there was
378                  * none), then we're done with the rm.
379                  */
380                 if (!rm->data.op_active || conn->c_xmit_data_sent) {
381                         conn->c_xmit_rm = NULL;
382                         conn->c_xmit_sg = 0;
383                         conn->c_xmit_hdr_off = 0;
384                         conn->c_xmit_data_off = 0;
385                         conn->c_xmit_rdma_sent = 0;
386                         conn->c_xmit_atomic_sent = 0;
387                         conn->c_xmit_data_sent = 0;
388
389                         rds_message_put(rm);
390                 }
391         }
392
393 over_batch:
394         if (conn->c_trans->xmit_complete)
395                 conn->c_trans->xmit_complete(conn);
396         release_in_xmit(conn);
397
398         /* Nuke any messages we decided not to retransmit. */
399         if (!list_empty(&to_be_dropped)) {
400                 /* irqs on here, so we can put(), unlike above */
401                 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
402                         rds_message_put(rm);
403                 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
404         }
405
406         /*
407          * Other senders can queue a message after we last test the send queue
408          * but before we clear RDS_IN_XMIT.  In that case they'd back off and
409          * not try and send their newly queued message.  We need to check the
410          * send queue after having cleared RDS_IN_XMIT so that their message
411          * doesn't get stuck on the send queue.
412          *
413          * If the transport cannot continue (i.e ret != 0), then it must
414          * call us when more room is available, such as from the tx
415          * completion handler.
416          *
417          * We have an extra generation check here so that if someone manages
418          * to jump in after our release_in_xmit, we'll see that they have done
419          * some work and we will skip our goto
420          */
421         if (ret == 0) {
422                 smp_mb();
423                 if ((test_bit(0, &conn->c_map_queued) ||
424                      !list_empty(&conn->c_send_queue)) &&
425                     send_gen == conn->c_send_gen) {
426                         rds_stats_inc(s_send_lock_queue_raced);
427                         if (batch_count < send_batch_count)
428                                 goto restart;
429                         queue_delayed_work(rds_wq, &conn->c_send_w, 1);
430                 }
431         }
432 out:
433         return ret;
434 }
435
436 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
437 {
438         u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
439
440         assert_spin_locked(&rs->rs_lock);
441
442         BUG_ON(rs->rs_snd_bytes < len);
443         rs->rs_snd_bytes -= len;
444
445         if (rs->rs_snd_bytes == 0)
446                 rds_stats_inc(s_send_queue_empty);
447 }
448
449 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
450                                     is_acked_func is_acked)
451 {
452         if (is_acked)
453                 return is_acked(rm, ack);
454         return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
455 }
456
457 /*
458  * This is pretty similar to what happens below in the ACK
459  * handling code - except that we call here as soon as we get
460  * the IB send completion on the RDMA op and the accompanying
461  * message.
462  */
463 void rds_rdma_send_complete(struct rds_message *rm, int status)
464 {
465         struct rds_sock *rs = NULL;
466         struct rm_rdma_op *ro;
467         struct rds_notifier *notifier;
468         unsigned long flags;
469
470         spin_lock_irqsave(&rm->m_rs_lock, flags);
471
472         ro = &rm->rdma;
473         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
474             ro->op_active && ro->op_notify && ro->op_notifier) {
475                 notifier = ro->op_notifier;
476                 rs = rm->m_rs;
477                 sock_hold(rds_rs_to_sk(rs));
478
479                 notifier->n_status = status;
480                 spin_lock(&rs->rs_lock);
481                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
482                 spin_unlock(&rs->rs_lock);
483
484                 ro->op_notifier = NULL;
485         }
486
487         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
488
489         if (rs) {
490                 rds_wake_sk_sleep(rs);
491                 sock_put(rds_rs_to_sk(rs));
492         }
493 }
494 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
495
496 /*
497  * Just like above, except looks at atomic op
498  */
499 void rds_atomic_send_complete(struct rds_message *rm, int status)
500 {
501         struct rds_sock *rs = NULL;
502         struct rm_atomic_op *ao;
503         struct rds_notifier *notifier;
504         unsigned long flags;
505
506         spin_lock_irqsave(&rm->m_rs_lock, flags);
507
508         ao = &rm->atomic;
509         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
510             && ao->op_active && ao->op_notify && ao->op_notifier) {
511                 notifier = ao->op_notifier;
512                 rs = rm->m_rs;
513                 sock_hold(rds_rs_to_sk(rs));
514
515                 notifier->n_status = status;
516                 spin_lock(&rs->rs_lock);
517                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
518                 spin_unlock(&rs->rs_lock);
519
520                 ao->op_notifier = NULL;
521         }
522
523         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
524
525         if (rs) {
526                 rds_wake_sk_sleep(rs);
527                 sock_put(rds_rs_to_sk(rs));
528         }
529 }
530 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
531
532 /*
533  * This is the same as rds_rdma_send_complete except we
534  * don't do any locking - we have all the ingredients (message,
535  * socket, socket lock) and can just move the notifier.
536  */
537 static inline void
538 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
539 {
540         struct rm_rdma_op *ro;
541         struct rm_atomic_op *ao;
542
543         ro = &rm->rdma;
544         if (ro->op_active && ro->op_notify && ro->op_notifier) {
545                 ro->op_notifier->n_status = status;
546                 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
547                 ro->op_notifier = NULL;
548         }
549
550         ao = &rm->atomic;
551         if (ao->op_active && ao->op_notify && ao->op_notifier) {
552                 ao->op_notifier->n_status = status;
553                 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
554                 ao->op_notifier = NULL;
555         }
556
557         /* No need to wake the app - caller does this */
558 }
559
560 /*
561  * This is called from the IB send completion when we detect
562  * a RDMA operation that failed with remote access error.
563  * So speed is not an issue here.
564  */
565 struct rds_message *rds_send_get_message(struct rds_connection *conn,
566                                          struct rm_rdma_op *op)
567 {
568         struct rds_message *rm, *tmp, *found = NULL;
569         unsigned long flags;
570
571         spin_lock_irqsave(&conn->c_lock, flags);
572
573         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
574                 if (&rm->rdma == op) {
575                         atomic_inc(&rm->m_refcount);
576                         found = rm;
577                         goto out;
578                 }
579         }
580
581         list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
582                 if (&rm->rdma == op) {
583                         atomic_inc(&rm->m_refcount);
584                         found = rm;
585                         break;
586                 }
587         }
588
589 out:
590         spin_unlock_irqrestore(&conn->c_lock, flags);
591
592         return found;
593 }
594 EXPORT_SYMBOL_GPL(rds_send_get_message);
595
596 /*
597  * This removes messages from the socket's list if they're on it.  The list
598  * argument must be private to the caller, we must be able to modify it
599  * without locks.  The messages must have a reference held for their
600  * position on the list.  This function will drop that reference after
601  * removing the messages from the 'messages' list regardless of if it found
602  * the messages on the socket list or not.
603  */
604 static void rds_send_remove_from_sock(struct list_head *messages, int status)
605 {
606         unsigned long flags;
607         struct rds_sock *rs = NULL;
608         struct rds_message *rm;
609
610         while (!list_empty(messages)) {
611                 int was_on_sock = 0;
612
613                 rm = list_entry(messages->next, struct rds_message,
614                                 m_conn_item);
615                 list_del_init(&rm->m_conn_item);
616
617                 /*
618                  * If we see this flag cleared then we're *sure* that someone
619                  * else beat us to removing it from the sock.  If we race
620                  * with their flag update we'll get the lock and then really
621                  * see that the flag has been cleared.
622                  *
623                  * The message spinlock makes sure nobody clears rm->m_rs
624                  * while we're messing with it. It does not prevent the
625                  * message from being removed from the socket, though.
626                  */
627                 spin_lock_irqsave(&rm->m_rs_lock, flags);
628                 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
629                         goto unlock_and_drop;
630
631                 if (rs != rm->m_rs) {
632                         if (rs) {
633                                 rds_wake_sk_sleep(rs);
634                                 sock_put(rds_rs_to_sk(rs));
635                         }
636                         rs = rm->m_rs;
637                         if (rs)
638                                 sock_hold(rds_rs_to_sk(rs));
639                 }
640                 if (!rs)
641                         goto unlock_and_drop;
642                 spin_lock(&rs->rs_lock);
643
644                 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
645                         struct rm_rdma_op *ro = &rm->rdma;
646                         struct rds_notifier *notifier;
647
648                         list_del_init(&rm->m_sock_item);
649                         rds_send_sndbuf_remove(rs, rm);
650
651                         if (ro->op_active && ro->op_notifier &&
652                                (ro->op_notify || (ro->op_recverr && status))) {
653                                 notifier = ro->op_notifier;
654                                 list_add_tail(&notifier->n_list,
655                                                 &rs->rs_notify_queue);
656                                 if (!notifier->n_status)
657                                         notifier->n_status = status;
658                                 rm->rdma.op_notifier = NULL;
659                         }
660                         was_on_sock = 1;
661                         rm->m_rs = NULL;
662                 }
663                 spin_unlock(&rs->rs_lock);
664
665 unlock_and_drop:
666                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
667                 rds_message_put(rm);
668                 if (was_on_sock)
669                         rds_message_put(rm);
670         }
671
672         if (rs) {
673                 rds_wake_sk_sleep(rs);
674                 sock_put(rds_rs_to_sk(rs));
675         }
676 }
677
678 /*
679  * Transports call here when they've determined that the receiver queued
680  * messages up to, and including, the given sequence number.  Messages are
681  * moved to the retrans queue when rds_send_xmit picks them off the send
682  * queue. This means that in the TCP case, the message may not have been
683  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
684  * checks the RDS_MSG_HAS_ACK_SEQ bit.
685  */
686 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
687                          is_acked_func is_acked)
688 {
689         struct rds_message *rm, *tmp;
690         unsigned long flags;
691         LIST_HEAD(list);
692
693         spin_lock_irqsave(&conn->c_lock, flags);
694
695         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
696                 if (!rds_send_is_acked(rm, ack, is_acked))
697                         break;
698
699                 list_move(&rm->m_conn_item, &list);
700                 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
701         }
702
703         /* order flag updates with spin locks */
704         if (!list_empty(&list))
705                 smp_mb__after_atomic();
706
707         spin_unlock_irqrestore(&conn->c_lock, flags);
708
709         /* now remove the messages from the sock list as needed */
710         rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
711 }
712 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
713
714 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
715 {
716         struct rds_message *rm, *tmp;
717         struct rds_connection *conn;
718         unsigned long flags;
719         LIST_HEAD(list);
720
721         /* get all the messages we're dropping under the rs lock */
722         spin_lock_irqsave(&rs->rs_lock, flags);
723
724         list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
725                 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
726                              dest->sin_port != rm->m_inc.i_hdr.h_dport))
727                         continue;
728
729                 list_move(&rm->m_sock_item, &list);
730                 rds_send_sndbuf_remove(rs, rm);
731                 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
732         }
733
734         /* order flag updates with the rs lock */
735         smp_mb__after_atomic();
736
737         spin_unlock_irqrestore(&rs->rs_lock, flags);
738
739         if (list_empty(&list))
740                 return;
741
742         /* Remove the messages from the conn */
743         list_for_each_entry(rm, &list, m_sock_item) {
744
745                 conn = rm->m_inc.i_conn;
746
747                 spin_lock_irqsave(&conn->c_lock, flags);
748                 /*
749                  * Maybe someone else beat us to removing rm from the conn.
750                  * If we race with their flag update we'll get the lock and
751                  * then really see that the flag has been cleared.
752                  */
753                 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
754                         spin_unlock_irqrestore(&conn->c_lock, flags);
755                         spin_lock_irqsave(&rm->m_rs_lock, flags);
756                         rm->m_rs = NULL;
757                         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
758                         continue;
759                 }
760                 list_del_init(&rm->m_conn_item);
761                 spin_unlock_irqrestore(&conn->c_lock, flags);
762
763                 /*
764                  * Couldn't grab m_rs_lock in top loop (lock ordering),
765                  * but we can now.
766                  */
767                 spin_lock_irqsave(&rm->m_rs_lock, flags);
768
769                 spin_lock(&rs->rs_lock);
770                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
771                 spin_unlock(&rs->rs_lock);
772
773                 rm->m_rs = NULL;
774                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
775
776                 rds_message_put(rm);
777         }
778
779         rds_wake_sk_sleep(rs);
780
781         while (!list_empty(&list)) {
782                 rm = list_entry(list.next, struct rds_message, m_sock_item);
783                 list_del_init(&rm->m_sock_item);
784                 rds_message_wait(rm);
785
786                 /* just in case the code above skipped this message
787                  * because RDS_MSG_ON_CONN wasn't set, run it again here
788                  * taking m_rs_lock is the only thing that keeps us
789                  * from racing with ack processing.
790                  */
791                 spin_lock_irqsave(&rm->m_rs_lock, flags);
792
793                 spin_lock(&rs->rs_lock);
794                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
795                 spin_unlock(&rs->rs_lock);
796
797                 rm->m_rs = NULL;
798                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
799
800                 rds_message_put(rm);
801         }
802 }
803
804 /*
805  * we only want this to fire once so we use the callers 'queued'.  It's
806  * possible that another thread can race with us and remove the
807  * message from the flow with RDS_CANCEL_SENT_TO.
808  */
809 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
810                              struct rds_message *rm, __be16 sport,
811                              __be16 dport, int *queued)
812 {
813         unsigned long flags;
814         u32 len;
815
816         if (*queued)
817                 goto out;
818
819         len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
820
821         /* this is the only place which holds both the socket's rs_lock
822          * and the connection's c_lock */
823         spin_lock_irqsave(&rs->rs_lock, flags);
824
825         /*
826          * If there is a little space in sndbuf, we don't queue anything,
827          * and userspace gets -EAGAIN. But poll() indicates there's send
828          * room. This can lead to bad behavior (spinning) if snd_bytes isn't
829          * freed up by incoming acks. So we check the *old* value of
830          * rs_snd_bytes here to allow the last msg to exceed the buffer,
831          * and poll() now knows no more data can be sent.
832          */
833         if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
834                 rs->rs_snd_bytes += len;
835
836                 /* let recv side know we are close to send space exhaustion.
837                  * This is probably not the optimal way to do it, as this
838                  * means we set the flag on *all* messages as soon as our
839                  * throughput hits a certain threshold.
840                  */
841                 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
842                         __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
843
844                 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
845                 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
846                 rds_message_addref(rm);
847                 rm->m_rs = rs;
848
849                 /* The code ordering is a little weird, but we're
850                    trying to minimize the time we hold c_lock */
851                 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
852                 rm->m_inc.i_conn = conn;
853                 rds_message_addref(rm);
854
855                 spin_lock(&conn->c_lock);
856                 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
857                 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
858                 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
859                 spin_unlock(&conn->c_lock);
860
861                 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
862                          rm, len, rs, rs->rs_snd_bytes,
863                          (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
864
865                 *queued = 1;
866         }
867
868         spin_unlock_irqrestore(&rs->rs_lock, flags);
869 out:
870         return *queued;
871 }
872
873 /*
874  * rds_message is getting to be quite complicated, and we'd like to allocate
875  * it all in one go. This figures out how big it needs to be up front.
876  */
877 static int rds_rm_size(struct msghdr *msg, int data_len)
878 {
879         struct cmsghdr *cmsg;
880         int size = 0;
881         int cmsg_groups = 0;
882         int retval;
883
884         for_each_cmsghdr(cmsg, msg) {
885                 if (!CMSG_OK(msg, cmsg))
886                         return -EINVAL;
887
888                 if (cmsg->cmsg_level != SOL_RDS)
889                         continue;
890
891                 switch (cmsg->cmsg_type) {
892                 case RDS_CMSG_RDMA_ARGS:
893                         cmsg_groups |= 1;
894                         retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
895                         if (retval < 0)
896                                 return retval;
897                         size += retval;
898
899                         break;
900
901                 case RDS_CMSG_RDMA_DEST:
902                 case RDS_CMSG_RDMA_MAP:
903                         cmsg_groups |= 2;
904                         /* these are valid but do no add any size */
905                         break;
906
907                 case RDS_CMSG_ATOMIC_CSWP:
908                 case RDS_CMSG_ATOMIC_FADD:
909                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
910                 case RDS_CMSG_MASKED_ATOMIC_FADD:
911                         cmsg_groups |= 1;
912                         size += sizeof(struct scatterlist);
913                         break;
914
915                 default:
916                         return -EINVAL;
917                 }
918
919         }
920
921         size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
922
923         /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
924         if (cmsg_groups == 3)
925                 return -EINVAL;
926
927         return size;
928 }
929
930 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
931                          struct msghdr *msg, int *allocated_mr)
932 {
933         struct cmsghdr *cmsg;
934         int ret = 0;
935
936         for_each_cmsghdr(cmsg, msg) {
937                 if (!CMSG_OK(msg, cmsg))
938                         return -EINVAL;
939
940                 if (cmsg->cmsg_level != SOL_RDS)
941                         continue;
942
943                 /* As a side effect, RDMA_DEST and RDMA_MAP will set
944                  * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
945                  */
946                 switch (cmsg->cmsg_type) {
947                 case RDS_CMSG_RDMA_ARGS:
948                         ret = rds_cmsg_rdma_args(rs, rm, cmsg);
949                         break;
950
951                 case RDS_CMSG_RDMA_DEST:
952                         ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
953                         break;
954
955                 case RDS_CMSG_RDMA_MAP:
956                         ret = rds_cmsg_rdma_map(rs, rm, cmsg);
957                         if (!ret)
958                                 *allocated_mr = 1;
959                         break;
960                 case RDS_CMSG_ATOMIC_CSWP:
961                 case RDS_CMSG_ATOMIC_FADD:
962                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
963                 case RDS_CMSG_MASKED_ATOMIC_FADD:
964                         ret = rds_cmsg_atomic(rs, rm, cmsg);
965                         break;
966
967                 default:
968                         return -EINVAL;
969                 }
970
971                 if (ret)
972                         break;
973         }
974
975         return ret;
976 }
977
978 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
979 {
980         struct sock *sk = sock->sk;
981         struct rds_sock *rs = rds_sk_to_rs(sk);
982         DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
983         __be32 daddr;
984         __be16 dport;
985         struct rds_message *rm = NULL;
986         struct rds_connection *conn;
987         int ret = 0;
988         int queued = 0, allocated_mr = 0;
989         int nonblock = msg->msg_flags & MSG_DONTWAIT;
990         long timeo = sock_sndtimeo(sk, nonblock);
991
992         /* Mirror Linux UDP mirror of BSD error message compatibility */
993         /* XXX: Perhaps MSG_MORE someday */
994         if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
995                 ret = -EOPNOTSUPP;
996                 goto out;
997         }
998
999         if (msg->msg_namelen) {
1000                 /* XXX fail non-unicast destination IPs? */
1001                 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1002                         ret = -EINVAL;
1003                         goto out;
1004                 }
1005                 daddr = usin->sin_addr.s_addr;
1006                 dport = usin->sin_port;
1007         } else {
1008                 /* We only care about consistency with ->connect() */
1009                 lock_sock(sk);
1010                 daddr = rs->rs_conn_addr;
1011                 dport = rs->rs_conn_port;
1012                 release_sock(sk);
1013         }
1014
1015         /* racing with another thread binding seems ok here */
1016         if (daddr == 0 || rs->rs_bound_addr == 0) {
1017                 ret = -ENOTCONN; /* XXX not a great errno */
1018                 goto out;
1019         }
1020
1021         if (payload_len > rds_sk_sndbuf(rs)) {
1022                 ret = -EMSGSIZE;
1023                 goto out;
1024         }
1025
1026         /* size of rm including all sgs */
1027         ret = rds_rm_size(msg, payload_len);
1028         if (ret < 0)
1029                 goto out;
1030
1031         rm = rds_message_alloc(ret, GFP_KERNEL);
1032         if (!rm) {
1033                 ret = -ENOMEM;
1034                 goto out;
1035         }
1036
1037         /* Attach data to the rm */
1038         if (payload_len) {
1039                 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1040                 if (!rm->data.op_sg) {
1041                         ret = -ENOMEM;
1042                         goto out;
1043                 }
1044                 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1045                 if (ret)
1046                         goto out;
1047         }
1048         rm->data.op_active = 1;
1049
1050         rm->m_daddr = daddr;
1051
1052         /* rds_conn_create has a spinlock that runs with IRQ off.
1053          * Caching the conn in the socket helps a lot. */
1054         if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1055                 conn = rs->rs_conn;
1056         else {
1057                 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1058                                                 rs->rs_bound_addr, daddr,
1059                                         rs->rs_transport,
1060                                         sock->sk->sk_allocation);
1061                 if (IS_ERR(conn)) {
1062                         ret = PTR_ERR(conn);
1063                         goto out;
1064                 }
1065                 rs->rs_conn = conn;
1066         }
1067
1068         /* Parse any control messages the user may have included. */
1069         ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1070         if (ret)
1071                 goto out;
1072
1073         if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1074                 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1075                                &rm->rdma, conn->c_trans->xmit_rdma);
1076                 ret = -EOPNOTSUPP;
1077                 goto out;
1078         }
1079
1080         if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1081                 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1082                                &rm->atomic, conn->c_trans->xmit_atomic);
1083                 ret = -EOPNOTSUPP;
1084                 goto out;
1085         }
1086
1087         rds_conn_connect_if_down(conn);
1088
1089         ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1090         if (ret) {
1091                 rs->rs_seen_congestion = 1;
1092                 goto out;
1093         }
1094
1095         while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1096                                   dport, &queued)) {
1097                 rds_stats_inc(s_send_queue_full);
1098
1099                 if (nonblock) {
1100                         ret = -EAGAIN;
1101                         goto out;
1102                 }
1103
1104                 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1105                                         rds_send_queue_rm(rs, conn, rm,
1106                                                           rs->rs_bound_port,
1107                                                           dport,
1108                                                           &queued),
1109                                         timeo);
1110                 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1111                 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1112                         continue;
1113
1114                 ret = timeo;
1115                 if (ret == 0)
1116                         ret = -ETIMEDOUT;
1117                 goto out;
1118         }
1119
1120         /*
1121          * By now we've committed to the send.  We reuse rds_send_worker()
1122          * to retry sends in the rds thread if the transport asks us to.
1123          */
1124         rds_stats_inc(s_send_queued);
1125
1126         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1127                 rds_send_xmit(conn);
1128
1129         rds_message_put(rm);
1130         return payload_len;
1131
1132 out:
1133         /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1134          * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1135          * or in any other way, we need to destroy the MR again */
1136         if (allocated_mr)
1137                 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1138
1139         if (rm)
1140                 rds_message_put(rm);
1141         return ret;
1142 }
1143
1144 /*
1145  * Reply to a ping packet.
1146  */
1147 int
1148 rds_send_pong(struct rds_connection *conn, __be16 dport)
1149 {
1150         struct rds_message *rm;
1151         unsigned long flags;
1152         int ret = 0;
1153
1154         rm = rds_message_alloc(0, GFP_ATOMIC);
1155         if (!rm) {
1156                 ret = -ENOMEM;
1157                 goto out;
1158         }
1159
1160         rm->m_daddr = conn->c_faddr;
1161         rm->data.op_active = 1;
1162
1163         rds_conn_connect_if_down(conn);
1164
1165         ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1166         if (ret)
1167                 goto out;
1168
1169         spin_lock_irqsave(&conn->c_lock, flags);
1170         list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1171         set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1172         rds_message_addref(rm);
1173         rm->m_inc.i_conn = conn;
1174
1175         rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1176                                     conn->c_next_tx_seq);
1177         conn->c_next_tx_seq++;
1178         spin_unlock_irqrestore(&conn->c_lock, flags);
1179
1180         rds_stats_inc(s_send_queued);
1181         rds_stats_inc(s_send_pong);
1182
1183         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1184                 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1185
1186         rds_message_put(rm);
1187         return 0;
1188
1189 out:
1190         if (rm)
1191                 rds_message_put(rm);
1192         return ret;
1193 }