]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/rxrpc/call_object.c
Merge tag 'perf-urgent-for-mingo-4.11-20170317' of git://git.kernel.org/pub/scm/linux...
[karo-tx-linux.git] / net / rxrpc / call_object.c
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21
22 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
23         [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
24         [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
25         [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
26         [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
27         [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
28         [RXRPC_CALL_SERVER_PREALLOC]            = "SvPrealc",
29         [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
30         [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
31         [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
32         [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
33         [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
34         [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
35         [RXRPC_CALL_COMPLETE]                   = "Complete",
36 };
37
38 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
39         [RXRPC_CALL_SUCCEEDED]                  = "Complete",
40         [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
41         [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
42         [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
43         [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
44 };
45
46 struct kmem_cache *rxrpc_call_jar;
47 LIST_HEAD(rxrpc_calls);
48 DEFINE_RWLOCK(rxrpc_call_lock);
49
50 static void rxrpc_call_timer_expired(unsigned long _call)
51 {
52         struct rxrpc_call *call = (struct rxrpc_call *)_call;
53
54         _enter("%d", call->debug_id);
55
56         if (call->state < RXRPC_CALL_COMPLETE)
57                 rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
58 }
59
60 /*
61  * find an extant server call
62  * - called in process context with IRQs enabled
63  */
64 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
65                                               unsigned long user_call_ID)
66 {
67         struct rxrpc_call *call;
68         struct rb_node *p;
69
70         _enter("%p,%lx", rx, user_call_ID);
71
72         read_lock(&rx->call_lock);
73
74         p = rx->calls.rb_node;
75         while (p) {
76                 call = rb_entry(p, struct rxrpc_call, sock_node);
77
78                 if (user_call_ID < call->user_call_ID)
79                         p = p->rb_left;
80                 else if (user_call_ID > call->user_call_ID)
81                         p = p->rb_right;
82                 else
83                         goto found_extant_call;
84         }
85
86         read_unlock(&rx->call_lock);
87         _leave(" = NULL");
88         return NULL;
89
90 found_extant_call:
91         rxrpc_get_call(call, rxrpc_call_got);
92         read_unlock(&rx->call_lock);
93         _leave(" = %p [%d]", call, atomic_read(&call->usage));
94         return call;
95 }
96
97 /*
98  * allocate a new call
99  */
100 struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
101 {
102         struct rxrpc_call *call;
103
104         call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
105         if (!call)
106                 return NULL;
107
108         call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
109                                     sizeof(struct sk_buff *),
110                                     gfp);
111         if (!call->rxtx_buffer)
112                 goto nomem;
113
114         call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
115         if (!call->rxtx_annotations)
116                 goto nomem_2;
117
118         setup_timer(&call->timer, rxrpc_call_timer_expired,
119                     (unsigned long)call);
120         INIT_WORK(&call->processor, &rxrpc_process_call);
121         INIT_LIST_HEAD(&call->link);
122         INIT_LIST_HEAD(&call->chan_wait_link);
123         INIT_LIST_HEAD(&call->accept_link);
124         INIT_LIST_HEAD(&call->recvmsg_link);
125         INIT_LIST_HEAD(&call->sock_link);
126         init_waitqueue_head(&call->waitq);
127         spin_lock_init(&call->lock);
128         rwlock_init(&call->state_lock);
129         atomic_set(&call->usage, 1);
130         call->debug_id = atomic_inc_return(&rxrpc_debug_id);
131
132         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
133
134         /* Leave space in the ring to handle a maxed-out jumbo packet */
135         call->rx_winsize = rxrpc_rx_window_size;
136         call->tx_winsize = 16;
137         call->rx_expect_next = 1;
138
139         if (RXRPC_TX_SMSS > 2190)
140                 call->cong_cwnd = 2;
141         else if (RXRPC_TX_SMSS > 1095)
142                 call->cong_cwnd = 3;
143         else
144                 call->cong_cwnd = 4;
145         call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
146         return call;
147
148 nomem_2:
149         kfree(call->rxtx_buffer);
150 nomem:
151         kmem_cache_free(rxrpc_call_jar, call);
152         return NULL;
153 }
154
155 /*
156  * Allocate a new client call.
157  */
158 static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
159                                                   gfp_t gfp)
160 {
161         struct rxrpc_call *call;
162         ktime_t now;
163
164         _enter("");
165
166         call = rxrpc_alloc_call(gfp);
167         if (!call)
168                 return ERR_PTR(-ENOMEM);
169         call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
170         call->service_id = srx->srx_service;
171         call->tx_phase = true;
172         now = ktime_get_real();
173         call->acks_latest_ts = now;
174         call->cong_tstamp = now;
175
176         _leave(" = %p", call);
177         return call;
178 }
179
180 /*
181  * Initiate the call ack/resend/expiry timer.
182  */
183 static void rxrpc_start_call_timer(struct rxrpc_call *call)
184 {
185         ktime_t now = ktime_get_real(), expire_at;
186
187         expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
188         call->expire_at = expire_at;
189         call->ack_at = expire_at;
190         call->ping_at = expire_at;
191         call->resend_at = expire_at;
192         call->timer.expires = jiffies + LONG_MAX / 2;
193         rxrpc_set_timer(call, rxrpc_timer_begin, now);
194 }
195
196 /*
197  * set up a call for the given data
198  * - called in process context with IRQs enabled
199  */
200 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
201                                          struct rxrpc_conn_parameters *cp,
202                                          struct sockaddr_rxrpc *srx,
203                                          unsigned long user_call_ID,
204                                          gfp_t gfp)
205 {
206         struct rxrpc_call *call, *xcall;
207         struct rb_node *parent, **pp;
208         const void *here = __builtin_return_address(0);
209         int ret;
210
211         _enter("%p,%lx", rx, user_call_ID);
212
213         call = rxrpc_alloc_client_call(srx, gfp);
214         if (IS_ERR(call)) {
215                 _leave(" = %ld", PTR_ERR(call));
216                 return call;
217         }
218
219         trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
220                          here, (const void *)user_call_ID);
221
222         /* Publish the call, even though it is incompletely set up as yet */
223         write_lock(&rx->call_lock);
224
225         pp = &rx->calls.rb_node;
226         parent = NULL;
227         while (*pp) {
228                 parent = *pp;
229                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
230
231                 if (user_call_ID < xcall->user_call_ID)
232                         pp = &(*pp)->rb_left;
233                 else if (user_call_ID > xcall->user_call_ID)
234                         pp = &(*pp)->rb_right;
235                 else
236                         goto error_dup_user_ID;
237         }
238
239         rcu_assign_pointer(call->socket, rx);
240         call->user_call_ID = user_call_ID;
241         __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
242         rxrpc_get_call(call, rxrpc_call_got_userid);
243         rb_link_node(&call->sock_node, parent, pp);
244         rb_insert_color(&call->sock_node, &rx->calls);
245         list_add(&call->sock_link, &rx->sock_calls);
246
247         write_unlock(&rx->call_lock);
248
249         write_lock(&rxrpc_call_lock);
250         list_add_tail(&call->link, &rxrpc_calls);
251         write_unlock(&rxrpc_call_lock);
252
253         /* Set up or get a connection record and set the protocol parameters,
254          * including channel number and call ID.
255          */
256         ret = rxrpc_connect_call(call, cp, srx, gfp);
257         if (ret < 0)
258                 goto error;
259
260         trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
261                          here, NULL);
262
263         spin_lock_bh(&call->conn->params.peer->lock);
264         hlist_add_head(&call->error_link,
265                        &call->conn->params.peer->error_targets);
266         spin_unlock_bh(&call->conn->params.peer->lock);
267
268         rxrpc_start_call_timer(call);
269
270         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
271
272         _leave(" = %p [new]", call);
273         return call;
274
275         /* We unexpectedly found the user ID in the list after taking
276          * the call_lock.  This shouldn't happen unless the user races
277          * with itself and tries to add the same user ID twice at the
278          * same time in different threads.
279          */
280 error_dup_user_ID:
281         write_unlock(&rx->call_lock);
282         ret = -EEXIST;
283
284 error:
285         __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
286                                     RX_CALL_DEAD, ret);
287         trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
288                          here, ERR_PTR(ret));
289         rxrpc_release_call(rx, call);
290         rxrpc_put_call(call, rxrpc_call_put);
291         _leave(" = %d", ret);
292         return ERR_PTR(ret);
293 }
294
295 /*
296  * Set up an incoming call.  call->conn points to the connection.
297  * This is called in BH context and isn't allowed to fail.
298  */
299 void rxrpc_incoming_call(struct rxrpc_sock *rx,
300                          struct rxrpc_call *call,
301                          struct sk_buff *skb)
302 {
303         struct rxrpc_connection *conn = call->conn;
304         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
305         u32 chan;
306
307         _enter(",%d", call->conn->debug_id);
308
309         rcu_assign_pointer(call->socket, rx);
310         call->call_id           = sp->hdr.callNumber;
311         call->service_id        = sp->hdr.serviceId;
312         call->cid               = sp->hdr.cid;
313         call->state             = RXRPC_CALL_SERVER_ACCEPTING;
314         if (sp->hdr.securityIndex > 0)
315                 call->state     = RXRPC_CALL_SERVER_SECURING;
316         call->cong_tstamp       = skb->tstamp;
317
318         /* Set the channel for this call.  We don't get channel_lock as we're
319          * only defending against the data_ready handler (which we're called
320          * from) and the RESPONSE packet parser (which is only really
321          * interested in call_counter and can cope with a disagreement with the
322          * call pointer).
323          */
324         chan = sp->hdr.cid & RXRPC_CHANNELMASK;
325         conn->channels[chan].call_counter = call->call_id;
326         conn->channels[chan].call_id = call->call_id;
327         rcu_assign_pointer(conn->channels[chan].call, call);
328
329         spin_lock(&conn->params.peer->lock);
330         hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
331         spin_unlock(&conn->params.peer->lock);
332
333         _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
334
335         rxrpc_start_call_timer(call);
336         _leave("");
337 }
338
339 /*
340  * Queue a call's work processor, getting a ref to pass to the work queue.
341  */
342 bool rxrpc_queue_call(struct rxrpc_call *call)
343 {
344         const void *here = __builtin_return_address(0);
345         int n = __atomic_add_unless(&call->usage, 1, 0);
346         if (n == 0)
347                 return false;
348         if (rxrpc_queue_work(&call->processor))
349                 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
350         else
351                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
352         return true;
353 }
354
355 /*
356  * Queue a call's work processor, passing the callers ref to the work queue.
357  */
358 bool __rxrpc_queue_call(struct rxrpc_call *call)
359 {
360         const void *here = __builtin_return_address(0);
361         int n = atomic_read(&call->usage);
362         ASSERTCMP(n, >=, 1);
363         if (rxrpc_queue_work(&call->processor))
364                 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
365         else
366                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
367         return true;
368 }
369
370 /*
371  * Note the re-emergence of a call.
372  */
373 void rxrpc_see_call(struct rxrpc_call *call)
374 {
375         const void *here = __builtin_return_address(0);
376         if (call) {
377                 int n = atomic_read(&call->usage);
378
379                 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
380         }
381 }
382
383 /*
384  * Note the addition of a ref on a call.
385  */
386 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
387 {
388         const void *here = __builtin_return_address(0);
389         int n = atomic_inc_return(&call->usage);
390
391         trace_rxrpc_call(call, op, n, here, NULL);
392 }
393
394 /*
395  * Detach a call from its owning socket.
396  */
397 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
398 {
399         const void *here = __builtin_return_address(0);
400         struct rxrpc_connection *conn = call->conn;
401         bool put = false;
402         int i;
403
404         _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
405
406         trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
407                          here, (const void *)call->flags);
408
409         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
410
411         spin_lock_bh(&call->lock);
412         if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
413                 BUG();
414         spin_unlock_bh(&call->lock);
415
416         del_timer_sync(&call->timer);
417
418         /* Make sure we don't get any more notifications */
419         write_lock_bh(&rx->recvmsg_lock);
420
421         if (!list_empty(&call->recvmsg_link)) {
422                 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
423                        call, call->events, call->flags);
424                 list_del(&call->recvmsg_link);
425                 put = true;
426         }
427
428         /* list_empty() must return false in rxrpc_notify_socket() */
429         call->recvmsg_link.next = NULL;
430         call->recvmsg_link.prev = NULL;
431
432         write_unlock_bh(&rx->recvmsg_lock);
433         if (put)
434                 rxrpc_put_call(call, rxrpc_call_put);
435
436         write_lock(&rx->call_lock);
437
438         if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
439                 rb_erase(&call->sock_node, &rx->calls);
440                 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
441                 rxrpc_put_call(call, rxrpc_call_put_userid);
442         }
443
444         list_del(&call->sock_link);
445         write_unlock(&rx->call_lock);
446
447         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
448
449         if (conn)
450                 rxrpc_disconnect_call(call);
451
452         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
453                 rxrpc_free_skb(call->rxtx_buffer[i],
454                                (call->tx_phase ? rxrpc_skb_tx_cleaned :
455                                 rxrpc_skb_rx_cleaned));
456                 call->rxtx_buffer[i] = NULL;
457         }
458
459         _leave("");
460 }
461
462 /*
463  * release all the calls associated with a socket
464  */
465 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
466 {
467         struct rxrpc_call *call;
468
469         _enter("%p", rx);
470
471         while (!list_empty(&rx->to_be_accepted)) {
472                 call = list_entry(rx->to_be_accepted.next,
473                                   struct rxrpc_call, accept_link);
474                 list_del(&call->accept_link);
475                 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, ECONNRESET);
476                 rxrpc_put_call(call, rxrpc_call_put);
477         }
478
479         while (!list_empty(&rx->sock_calls)) {
480                 call = list_entry(rx->sock_calls.next,
481                                   struct rxrpc_call, sock_link);
482                 rxrpc_get_call(call, rxrpc_call_got);
483                 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
484                 rxrpc_send_abort_packet(call);
485                 rxrpc_release_call(rx, call);
486                 rxrpc_put_call(call, rxrpc_call_put);
487         }
488
489         _leave("");
490 }
491
492 /*
493  * release a call
494  */
495 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
496 {
497         const void *here = __builtin_return_address(0);
498         int n;
499
500         ASSERT(call != NULL);
501
502         n = atomic_dec_return(&call->usage);
503         trace_rxrpc_call(call, op, n, here, NULL);
504         ASSERTCMP(n, >=, 0);
505         if (n == 0) {
506                 _debug("call %d dead", call->debug_id);
507                 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
508
509                 write_lock(&rxrpc_call_lock);
510                 list_del_init(&call->link);
511                 write_unlock(&rxrpc_call_lock);
512
513                 rxrpc_cleanup_call(call);
514         }
515 }
516
517 /*
518  * Final call destruction under RCU.
519  */
520 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
521 {
522         struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
523
524         rxrpc_put_peer(call->peer);
525         kfree(call->rxtx_buffer);
526         kfree(call->rxtx_annotations);
527         kmem_cache_free(rxrpc_call_jar, call);
528 }
529
530 /*
531  * clean up a call
532  */
533 void rxrpc_cleanup_call(struct rxrpc_call *call)
534 {
535         int i;
536
537         _net("DESTROY CALL %d", call->debug_id);
538
539         memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
540
541         del_timer_sync(&call->timer);
542
543         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
544         ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
545         ASSERTCMP(call->conn, ==, NULL);
546
547         /* Clean up the Rx/Tx buffer */
548         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
549                 rxrpc_free_skb(call->rxtx_buffer[i],
550                                (call->tx_phase ? rxrpc_skb_tx_cleaned :
551                                 rxrpc_skb_rx_cleaned));
552
553         rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
554
555         call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
556 }
557
558 /*
559  * Make sure that all calls are gone.
560  */
561 void __exit rxrpc_destroy_all_calls(void)
562 {
563         struct rxrpc_call *call;
564
565         _enter("");
566
567         if (list_empty(&rxrpc_calls))
568                 return;
569
570         write_lock(&rxrpc_call_lock);
571
572         while (!list_empty(&rxrpc_calls)) {
573                 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
574                 _debug("Zapping call %p", call);
575
576                 rxrpc_see_call(call);
577                 list_del_init(&call->link);
578
579                 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
580                        call, atomic_read(&call->usage),
581                        rxrpc_call_states[call->state],
582                        call->flags, call->events);
583
584                 write_unlock(&rxrpc_call_lock);
585                 cond_resched();
586                 write_lock(&rxrpc_call_lock);
587         }
588
589         write_unlock(&rxrpc_call_lock);
590 }