2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/random.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/metrics.h>
54 # undef RPC_DEBUG_DATA
55 # define RPCDBG_FACILITY RPCDBG_XPRT
61 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
62 static inline void do_xprt_reserve(struct rpc_task *);
63 static void xprt_connect_status(struct rpc_task *task);
64 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
67 * The transport code maintains an estimate on the maximum number of out-
68 * standing RPC requests, using a smoothed version of the congestion
69 * avoidance implemented in 44BSD. This is basically the Van Jacobson
70 * congestion algorithm: If a retransmit occurs, the congestion window is
71 * halved; otherwise, it is incremented by 1/cwnd when
73 * - a reply is received and
74 * - a full number of requests are outstanding and
75 * - the congestion window hasn't been updated recently.
77 #define RPC_CWNDSHIFT (8U)
78 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
79 #define RPC_INITCWND RPC_CWNDSCALE
80 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
82 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
85 * xprt_reserve_xprt - serialize write access to transports
86 * @task: task that is requesting access to the transport
88 * This prevents mixing the payload of separate requests, and prevents
89 * transport connects from colliding with writes. No congestion control
92 int xprt_reserve_xprt(struct rpc_task *task)
94 struct rpc_xprt *xprt = task->tk_xprt;
95 struct rpc_rqst *req = task->tk_rqstp;
97 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
98 if (task == xprt->snd_task)
104 xprt->snd_task = task;
106 req->rq_bytes_sent = 0;
112 dprintk("RPC: %4d failed to lock transport %p\n",
114 task->tk_timeout = 0;
115 task->tk_status = -EAGAIN;
116 if (req && req->rq_ntrans)
117 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
119 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
123 static void xprt_clear_locked(struct rpc_xprt *xprt)
125 xprt->snd_task = NULL;
126 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
127 smp_mb__before_clear_bit();
128 clear_bit(XPRT_LOCKED, &xprt->state);
129 smp_mb__after_clear_bit();
131 schedule_work(&xprt->task_cleanup);
135 * xprt_reserve_xprt_cong - serialize write access to transports
136 * @task: task that is requesting access to the transport
138 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
139 * integrated into the decision of whether a request is allowed to be
140 * woken up and given access to the transport.
142 int xprt_reserve_xprt_cong(struct rpc_task *task)
144 struct rpc_xprt *xprt = task->tk_xprt;
145 struct rpc_rqst *req = task->tk_rqstp;
147 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
148 if (task == xprt->snd_task)
152 if (__xprt_get_cong(xprt, task)) {
153 xprt->snd_task = task;
155 req->rq_bytes_sent = 0;
160 xprt_clear_locked(xprt);
162 dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
163 task->tk_timeout = 0;
164 task->tk_status = -EAGAIN;
165 if (req && req->rq_ntrans)
166 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
168 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
172 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
176 spin_lock_bh(&xprt->transport_lock);
177 retval = xprt->ops->reserve_xprt(task);
178 spin_unlock_bh(&xprt->transport_lock);
182 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
184 struct rpc_task *task;
185 struct rpc_rqst *req;
187 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
190 task = rpc_wake_up_next(&xprt->resend);
192 task = rpc_wake_up_next(&xprt->sending);
197 req = task->tk_rqstp;
198 xprt->snd_task = task;
200 req->rq_bytes_sent = 0;
206 xprt_clear_locked(xprt);
209 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
211 struct rpc_task *task;
213 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
215 if (RPCXPRT_CONGESTED(xprt))
217 task = rpc_wake_up_next(&xprt->resend);
219 task = rpc_wake_up_next(&xprt->sending);
223 if (__xprt_get_cong(xprt, task)) {
224 struct rpc_rqst *req = task->tk_rqstp;
225 xprt->snd_task = task;
227 req->rq_bytes_sent = 0;
233 xprt_clear_locked(xprt);
237 * xprt_release_xprt - allow other requests to use a transport
238 * @xprt: transport with other tasks potentially waiting
239 * @task: task that is releasing access to the transport
241 * Note that "task" can be NULL. No congestion control is provided.
243 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
245 if (xprt->snd_task == task) {
246 xprt_clear_locked(xprt);
247 __xprt_lock_write_next(xprt);
252 * xprt_release_xprt_cong - allow other requests to use a transport
253 * @xprt: transport with other tasks potentially waiting
254 * @task: task that is releasing access to the transport
256 * Note that "task" can be NULL. Another task is awoken to use the
257 * transport if the transport's congestion window allows it.
259 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
261 if (xprt->snd_task == task) {
262 xprt_clear_locked(xprt);
263 __xprt_lock_write_next_cong(xprt);
267 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
269 spin_lock_bh(&xprt->transport_lock);
270 xprt->ops->release_xprt(xprt, task);
271 spin_unlock_bh(&xprt->transport_lock);
275 * Van Jacobson congestion avoidance. Check if the congestion window
276 * overflowed. Put the task to sleep if this is the case.
279 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
281 struct rpc_rqst *req = task->tk_rqstp;
285 dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n",
286 task->tk_pid, xprt->cong, xprt->cwnd);
287 if (RPCXPRT_CONGESTED(xprt))
290 xprt->cong += RPC_CWNDSCALE;
295 * Adjust the congestion window, and wake up the next task
296 * that has been sleeping due to congestion
299 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
304 xprt->cong -= RPC_CWNDSCALE;
305 __xprt_lock_write_next_cong(xprt);
309 * xprt_release_rqst_cong - housekeeping when request is complete
310 * @task: RPC request that recently completed
312 * Useful for transports that require congestion control.
314 void xprt_release_rqst_cong(struct rpc_task *task)
316 __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
320 * xprt_adjust_cwnd - adjust transport congestion window
321 * @task: recently completed RPC request used to adjust window
322 * @result: result code of completed RPC request
324 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
326 void xprt_adjust_cwnd(struct rpc_task *task, int result)
328 struct rpc_rqst *req = task->tk_rqstp;
329 struct rpc_xprt *xprt = task->tk_xprt;
330 unsigned long cwnd = xprt->cwnd;
332 if (result >= 0 && cwnd <= xprt->cong) {
333 /* The (cwnd >> 1) term makes sure
334 * the result gets rounded properly. */
335 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
336 if (cwnd > RPC_MAXCWND(xprt))
337 cwnd = RPC_MAXCWND(xprt);
338 __xprt_lock_write_next_cong(xprt);
339 } else if (result == -ETIMEDOUT) {
341 if (cwnd < RPC_CWNDSCALE)
342 cwnd = RPC_CWNDSCALE;
344 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
345 xprt->cong, xprt->cwnd, cwnd);
347 __xprt_put_cong(xprt, req);
351 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
352 * @xprt: transport with waiting tasks
353 * @status: result code to plant in each task before waking it
356 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
359 rpc_wake_up_status(&xprt->pending, status);
361 rpc_wake_up(&xprt->pending);
365 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
366 * @task: task to be put to sleep
369 void xprt_wait_for_buffer_space(struct rpc_task *task)
371 struct rpc_rqst *req = task->tk_rqstp;
372 struct rpc_xprt *xprt = req->rq_xprt;
374 task->tk_timeout = req->rq_timeout;
375 rpc_sleep_on(&xprt->pending, task, NULL, NULL);
379 * xprt_write_space - wake the task waiting for transport output buffer space
380 * @xprt: transport with waiting tasks
382 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
384 void xprt_write_space(struct rpc_xprt *xprt)
386 if (unlikely(xprt->shutdown))
389 spin_lock_bh(&xprt->transport_lock);
390 if (xprt->snd_task) {
391 dprintk("RPC: write space: waking waiting task on xprt %p\n",
393 rpc_wake_up_task(xprt->snd_task);
395 spin_unlock_bh(&xprt->transport_lock);
399 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
400 * @task: task whose timeout is to be set
402 * Set a request's retransmit timeout based on the transport's
403 * default timeout parameters. Used by transports that don't adjust
404 * the retransmit timeout based on round-trip time estimation.
406 void xprt_set_retrans_timeout_def(struct rpc_task *task)
408 task->tk_timeout = task->tk_rqstp->rq_timeout;
412 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
413 * @task: task whose timeout is to be set
415 * Set a request's retransmit timeout using the RTT estimator.
417 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
419 int timer = task->tk_msg.rpc_proc->p_timer;
420 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
421 struct rpc_rqst *req = task->tk_rqstp;
422 unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
424 task->tk_timeout = rpc_calc_rto(rtt, timer);
425 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
426 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
427 task->tk_timeout = max_timeout;
430 static void xprt_reset_majortimeo(struct rpc_rqst *req)
432 struct rpc_timeout *to = &req->rq_xprt->timeout;
434 req->rq_majortimeo = req->rq_timeout;
435 if (to->to_exponential)
436 req->rq_majortimeo <<= to->to_retries;
438 req->rq_majortimeo += to->to_increment * to->to_retries;
439 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
440 req->rq_majortimeo = to->to_maxval;
441 req->rq_majortimeo += jiffies;
445 * xprt_adjust_timeout - adjust timeout values for next retransmit
446 * @req: RPC request containing parameters to use for the adjustment
449 int xprt_adjust_timeout(struct rpc_rqst *req)
451 struct rpc_xprt *xprt = req->rq_xprt;
452 struct rpc_timeout *to = &xprt->timeout;
455 if (time_before(jiffies, req->rq_majortimeo)) {
456 if (to->to_exponential)
457 req->rq_timeout <<= 1;
459 req->rq_timeout += to->to_increment;
460 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
461 req->rq_timeout = to->to_maxval;
463 pprintk("RPC: %lu retrans\n", jiffies);
465 req->rq_timeout = to->to_initval;
467 xprt_reset_majortimeo(req);
468 /* Reset the RTT counters == "slow start" */
469 spin_lock_bh(&xprt->transport_lock);
470 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
471 spin_unlock_bh(&xprt->transport_lock);
472 pprintk("RPC: %lu timeout\n", jiffies);
476 if (req->rq_timeout == 0) {
477 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
478 req->rq_timeout = 5 * HZ;
483 static void xprt_autoclose(void *args)
485 struct rpc_xprt *xprt = (struct rpc_xprt *)args;
487 xprt_disconnect(xprt);
488 xprt->ops->close(xprt);
489 xprt_release_write(xprt, NULL);
493 * xprt_disconnect - mark a transport as disconnected
494 * @xprt: transport to flag for disconnect
497 void xprt_disconnect(struct rpc_xprt *xprt)
499 dprintk("RPC: disconnected transport %p\n", xprt);
500 spin_lock_bh(&xprt->transport_lock);
501 xprt_clear_connected(xprt);
502 xprt_wake_pending_tasks(xprt, -ENOTCONN);
503 spin_unlock_bh(&xprt->transport_lock);
507 xprt_init_autodisconnect(unsigned long data)
509 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
511 spin_lock(&xprt->transport_lock);
512 if (!list_empty(&xprt->recv) || xprt->shutdown)
514 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
516 spin_unlock(&xprt->transport_lock);
517 if (xprt_connecting(xprt))
518 xprt_release_write(xprt, NULL);
520 schedule_work(&xprt->task_cleanup);
523 spin_unlock(&xprt->transport_lock);
527 * xprt_connect - schedule a transport connect operation
528 * @task: RPC task that is requesting the connect
531 void xprt_connect(struct rpc_task *task)
533 struct rpc_xprt *xprt = task->tk_xprt;
535 dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
536 xprt, (xprt_connected(xprt) ? "is" : "is not"));
538 if (!xprt->addr.sin_port) {
539 task->tk_status = -EIO;
542 if (!xprt_lock_write(xprt, task))
544 if (xprt_connected(xprt))
545 xprt_release_write(xprt, task);
548 task->tk_rqstp->rq_bytes_sent = 0;
550 task->tk_timeout = xprt->connect_timeout;
551 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
552 xprt->stat.connect_start = jiffies;
553 xprt->ops->connect(task);
558 static void xprt_connect_status(struct rpc_task *task)
560 struct rpc_xprt *xprt = task->tk_xprt;
562 if (task->tk_status >= 0) {
563 xprt->stat.connect_count++;
564 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
565 dprintk("RPC: %4d xprt_connect_status: connection established\n",
570 switch (task->tk_status) {
573 dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
574 task->tk_pid, task->tk_client->cl_server);
577 dprintk("RPC: %4d xprt_connect_status: connection broken\n",
581 dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
585 dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
586 task->tk_pid, -task->tk_status, task->tk_client->cl_server);
587 xprt_release_write(xprt, task);
588 task->tk_status = -EIO;
592 /* if soft mounted, just cause this RPC to fail */
593 if (RPC_IS_SOFT(task)) {
594 xprt_release_write(xprt, task);
595 task->tk_status = -EIO;
600 * xprt_lookup_rqst - find an RPC request corresponding to an XID
601 * @xprt: transport on which the original request was transmitted
602 * @xid: RPC XID of incoming reply
605 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
607 struct list_head *pos;
609 list_for_each(pos, &xprt->recv) {
610 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
611 if (entry->rq_xid == xid)
614 xprt->stat.bad_xids++;
619 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
620 * @task: RPC request that recently completed
623 void xprt_update_rtt(struct rpc_task *task)
625 struct rpc_rqst *req = task->tk_rqstp;
626 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
627 unsigned timer = task->tk_msg.rpc_proc->p_timer;
630 if (req->rq_ntrans == 1)
631 rpc_update_rtt(rtt, timer,
632 (long)jiffies - req->rq_xtime);
633 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
638 * xprt_complete_rqst - called when reply processing is complete
639 * @task: RPC request that recently completed
640 * @copied: actual number of bytes received from the transport
642 * Caller holds transport lock.
644 void xprt_complete_rqst(struct rpc_task *task, int copied)
646 struct rpc_rqst *req = task->tk_rqstp;
648 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
649 task->tk_pid, ntohl(req->rq_xid), copied);
651 task->tk_xprt->stat.recvs++;
652 task->tk_rtt = (long)jiffies - req->rq_xtime;
654 list_del_init(&req->rq_list);
655 req->rq_received = req->rq_private_buf.len = copied;
656 rpc_wake_up_task(task);
659 static void xprt_timer(struct rpc_task *task)
661 struct rpc_rqst *req = task->tk_rqstp;
662 struct rpc_xprt *xprt = req->rq_xprt;
664 dprintk("RPC: %4d xprt_timer\n", task->tk_pid);
666 spin_lock(&xprt->transport_lock);
667 if (!req->rq_received) {
668 if (xprt->ops->timer)
669 xprt->ops->timer(task);
670 task->tk_status = -ETIMEDOUT;
672 task->tk_timeout = 0;
673 rpc_wake_up_task(task);
674 spin_unlock(&xprt->transport_lock);
678 * xprt_prepare_transmit - reserve the transport before sending a request
679 * @task: RPC task about to send a request
682 int xprt_prepare_transmit(struct rpc_task *task)
684 struct rpc_rqst *req = task->tk_rqstp;
685 struct rpc_xprt *xprt = req->rq_xprt;
688 dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid);
690 spin_lock_bh(&xprt->transport_lock);
691 if (req->rq_received && !req->rq_bytes_sent) {
692 err = req->rq_received;
695 if (!xprt->ops->reserve_xprt(task)) {
700 if (!xprt_connected(xprt)) {
705 spin_unlock_bh(&xprt->transport_lock);
710 xprt_abort_transmit(struct rpc_task *task)
712 struct rpc_xprt *xprt = task->tk_xprt;
714 xprt_release_write(xprt, task);
718 * xprt_transmit - send an RPC request on a transport
719 * @task: controlling RPC task
721 * We have to copy the iovec because sendmsg fiddles with its contents.
723 void xprt_transmit(struct rpc_task *task)
725 struct rpc_rqst *req = task->tk_rqstp;
726 struct rpc_xprt *xprt = req->rq_xprt;
729 dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
732 if (!req->rq_received) {
733 if (list_empty(&req->rq_list)) {
734 spin_lock_bh(&xprt->transport_lock);
735 /* Update the softirq receive buffer */
736 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
737 sizeof(req->rq_private_buf));
738 /* Add request to the receive list */
739 list_add_tail(&req->rq_list, &xprt->recv);
740 spin_unlock_bh(&xprt->transport_lock);
741 xprt_reset_majortimeo(req);
742 /* Turn off autodisconnect */
743 del_singleshot_timer_sync(&xprt->timer);
745 } else if (!req->rq_bytes_sent)
748 status = xprt->ops->send_request(task);
750 dprintk("RPC: %4d xmit complete\n", task->tk_pid);
751 spin_lock_bh(&xprt->transport_lock);
753 xprt->ops->set_retrans_timeout(task);
756 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
757 xprt->stat.bklog_u += xprt->backlog.qlen;
759 /* Don't race with disconnect */
760 if (!xprt_connected(xprt))
761 task->tk_status = -ENOTCONN;
762 else if (!req->rq_received)
763 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
765 xprt->ops->release_xprt(xprt, task);
766 spin_unlock_bh(&xprt->transport_lock);
770 /* Note: at this point, task->tk_sleeping has not yet been set,
771 * hence there is no danger of the waking up task being put on
772 * schedq, and being picked up by a parallel run of rpciod().
774 task->tk_status = status;
778 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
785 xprt_release_write(xprt, task);
789 static inline void do_xprt_reserve(struct rpc_task *task)
791 struct rpc_xprt *xprt = task->tk_xprt;
796 if (!list_empty(&xprt->free)) {
797 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
798 list_del_init(&req->rq_list);
799 task->tk_rqstp = req;
800 xprt_request_init(task, xprt);
803 dprintk("RPC: waiting for request slot\n");
804 task->tk_status = -EAGAIN;
805 task->tk_timeout = 0;
806 rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
810 * xprt_reserve - allocate an RPC request slot
811 * @task: RPC task requesting a slot allocation
813 * If no more slots are available, place the task on the transport's
816 void xprt_reserve(struct rpc_task *task)
818 struct rpc_xprt *xprt = task->tk_xprt;
820 task->tk_status = -EIO;
821 spin_lock(&xprt->reserve_lock);
822 do_xprt_reserve(task);
823 spin_unlock(&xprt->reserve_lock);
826 static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
831 static inline void xprt_init_xid(struct rpc_xprt *xprt)
833 get_random_bytes(&xprt->xid, sizeof(xprt->xid));
836 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
838 struct rpc_rqst *req = task->tk_rqstp;
840 req->rq_timeout = xprt->timeout.to_initval;
843 req->rq_buffer = NULL;
845 req->rq_xid = xprt_alloc_xid(xprt);
846 req->rq_release_snd_buf = NULL;
847 dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
848 req, ntohl(req->rq_xid));
852 * xprt_release - release an RPC request slot
853 * @task: task which is finished with the slot
856 void xprt_release(struct rpc_task *task)
858 struct rpc_xprt *xprt = task->tk_xprt;
859 struct rpc_rqst *req;
861 if (!(req = task->tk_rqstp))
863 rpc_count_iostats(task);
864 spin_lock_bh(&xprt->transport_lock);
865 xprt->ops->release_xprt(xprt, task);
866 if (xprt->ops->release_request)
867 xprt->ops->release_request(task);
868 if (!list_empty(&req->rq_list))
869 list_del(&req->rq_list);
870 xprt->last_used = jiffies;
871 if (list_empty(&xprt->recv))
872 mod_timer(&xprt->timer,
873 xprt->last_used + xprt->idle_timeout);
874 spin_unlock_bh(&xprt->transport_lock);
875 xprt->ops->buf_free(task);
876 task->tk_rqstp = NULL;
877 if (req->rq_release_snd_buf)
878 req->rq_release_snd_buf(req);
879 memset(req, 0, sizeof(*req)); /* mark unused */
881 dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
883 spin_lock(&xprt->reserve_lock);
884 list_add(&req->rq_list, &xprt->free);
885 rpc_wake_up_next(&xprt->backlog);
886 spin_unlock(&xprt->reserve_lock);
890 * xprt_set_timeout - set constant RPC timeout
891 * @to: RPC timeout parameters to set up
892 * @retr: number of retries
893 * @incr: amount of increase after each retry
896 void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
899 to->to_increment = incr;
900 to->to_maxval = to->to_initval + (incr * retr);
901 to->to_retries = retr;
902 to->to_exponential = 0;
905 static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
908 struct rpc_xprt *xprt;
909 struct rpc_rqst *req;
911 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
912 return ERR_PTR(-ENOMEM);
913 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
919 result = xs_setup_udp(xprt, to);
922 result = xs_setup_tcp(xprt, to);
925 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
932 return ERR_PTR(result);
935 spin_lock_init(&xprt->transport_lock);
936 spin_lock_init(&xprt->reserve_lock);
938 INIT_LIST_HEAD(&xprt->free);
939 INIT_LIST_HEAD(&xprt->recv);
940 INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
941 init_timer(&xprt->timer);
942 xprt->timer.function = xprt_init_autodisconnect;
943 xprt->timer.data = (unsigned long) xprt;
944 xprt->last_used = jiffies;
945 xprt->cwnd = RPC_INITCWND;
947 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
948 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
949 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
950 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
952 /* initialize free list */
953 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
954 list_add(&req->rq_list, &xprt->free);
958 dprintk("RPC: created transport %p with %u slots\n", xprt,
965 * xprt_create_proto - create an RPC client transport
966 * @proto: requested transport protocol
967 * @sap: remote peer's address
968 * @to: timeout parameters for new transport
971 struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
973 struct rpc_xprt *xprt;
975 xprt = xprt_setup(proto, sap, to);
977 dprintk("RPC: xprt_create_proto failed\n");
979 dprintk("RPC: xprt_create_proto created xprt %p\n", xprt);
984 * xprt_destroy - destroy an RPC transport, killing off all requests.
985 * @xprt: transport to destroy
988 int xprt_destroy(struct rpc_xprt *xprt)
990 dprintk("RPC: destroying transport %p\n", xprt);
992 del_timer_sync(&xprt->timer);
993 xprt->ops->destroy(xprt);