2 * linux/net/sunrpc/xprtsock.c
4 * Client-side transport implementation for sockets.
6 * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
7 * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
18 #include <linux/types.h>
19 #include <linux/slab.h>
20 #include <linux/capability.h>
21 #include <linux/sched.h>
22 #include <linux/pagemap.h>
23 #include <linux/errno.h>
24 #include <linux/socket.h>
26 #include <linux/net.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/sunrpc/clnt.h>
31 #include <linux/sunrpc/sched.h>
32 #include <linux/file.h>
35 #include <net/checksum.h>
42 unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
43 unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
45 unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
46 unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
49 * How many times to try sending a request on a socket before waiting
50 * for the socket buffer to clear.
52 #define XS_SENDMSG_RETRY (10U)
55 * Time out for an RPC UDP socket connect. UDP socket connects are
56 * synchronous, but we set a timeout anyway in case of resource
57 * exhaustion on the local host.
59 #define XS_UDP_CONN_TO (5U * HZ)
62 * Wait duration for an RPC TCP connection to be established. Solaris
63 * NFS over TCP uses 60 seconds, for example, which is in line with how
64 * long a server takes to reboot.
66 #define XS_TCP_CONN_TO (60U * HZ)
69 * Wait duration for a reply from the RPC portmapper.
71 #define XS_BIND_TO (60U * HZ)
74 * Delay if a UDP socket connect error occurs. This is most likely some
75 * kind of resource problem on the local host.
77 #define XS_UDP_REEST_TO (2U * HZ)
80 * The reestablish timeout allows clients to delay for a bit before attempting
81 * to reconnect to a server that just dropped our connection.
83 * We implement an exponential backoff when trying to reestablish a TCP
84 * transport connection with the server. Some servers like to drop a TCP
85 * connection when they are overworked, so we start with a short timeout and
86 * increase over time if the server is down or not responding.
88 #define XS_TCP_INIT_REEST_TO (3U * HZ)
89 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
92 * TCP idle timeout; client drops the transport socket if it is idle
93 * for this long. Note that we also timeout UDP sockets to prevent
94 * holding port numbers when there is no RPC traffic.
96 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
99 # undef RPC_DEBUG_DATA
100 # define RPCDBG_FACILITY RPCDBG_TRANS
103 #ifdef RPC_DEBUG_DATA
104 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
106 u8 *buf = (u8 *) packet;
109 dprintk("RPC: %s\n", msg);
110 for (j = 0; j < count && j < 128; j += 4) {
114 dprintk("0x%04x ", j);
116 dprintk("%02x%02x%02x%02x ",
117 buf[j], buf[j+1], buf[j+2], buf[j+3]);
122 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
129 struct rpc_xprt xprt;
134 struct socket * sock;
138 static void xs_format_peer_addresses(struct rpc_xprt *xprt)
140 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
143 buf = kzalloc(20, GFP_KERNEL);
145 snprintf(buf, 20, "%u.%u.%u.%u",
146 NIPQUAD(addr->sin_addr.s_addr));
148 xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
150 buf = kzalloc(8, GFP_KERNEL);
152 snprintf(buf, 8, "%u",
153 ntohs(addr->sin_port));
155 xprt->address_strings[RPC_DISPLAY_PORT] = buf;
157 if (xprt->prot == IPPROTO_UDP)
158 xprt->address_strings[RPC_DISPLAY_PROTO] = "udp";
160 xprt->address_strings[RPC_DISPLAY_PROTO] = "tcp";
162 buf = kzalloc(48, GFP_KERNEL);
164 snprintf(buf, 48, "addr=%u.%u.%u.%u port=%u proto=%s",
165 NIPQUAD(addr->sin_addr.s_addr),
166 ntohs(addr->sin_port),
167 xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
169 xprt->address_strings[RPC_DISPLAY_ALL] = buf;
172 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
174 kfree(xprt->address_strings[RPC_DISPLAY_ADDR]);
175 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
176 kfree(xprt->address_strings[RPC_DISPLAY_ALL]);
179 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
181 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
183 struct msghdr msg = {
185 .msg_namelen = addrlen,
186 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
189 .iov_base = vec->iov_base + base,
190 .iov_len = vec->iov_len - base,
193 if (iov.iov_len != 0)
194 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
195 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
198 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
201 unsigned int remainder;
204 remainder = xdr->page_len - base;
205 base += xdr->page_base;
206 ppage = xdr->pages + (base >> PAGE_SHIFT);
209 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
210 int flags = XS_SENDMSG_FLAGS;
213 if (remainder != 0 || more)
215 err = sock->ops->sendpage(sock, *ppage, base, len, flags);
216 if (remainder == 0 || err != len)
230 * xs_sendpages - write pages directly to a socket
231 * @sock: socket to send on
232 * @addr: UDP only -- address of destination
233 * @addrlen: UDP only -- length of destination address
234 * @xdr: buffer containing this request
235 * @base: starting position in the buffer
238 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
240 unsigned int remainder = xdr->len - base;
246 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
252 if (base < xdr->head[0].iov_len || addr != NULL) {
253 unsigned int len = xdr->head[0].iov_len - base;
255 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
256 if (remainder == 0 || err != len)
261 base -= xdr->head[0].iov_len;
263 if (base < xdr->page_len) {
264 unsigned int len = xdr->page_len - base;
266 err = xs_send_pagedata(sock, xdr, base, remainder != 0);
267 if (remainder == 0 || err != len)
272 base -= xdr->page_len;
274 if (base >= xdr->tail[0].iov_len)
276 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
286 * xs_nospace - place task on wait queue if transmit was incomplete
287 * @task: task to put to sleep
290 static void xs_nospace(struct rpc_task *task)
292 struct rpc_rqst *req = task->tk_rqstp;
293 struct rpc_xprt *xprt = req->rq_xprt;
294 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
296 dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
297 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
300 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
301 /* Protect against races with write_space */
302 spin_lock_bh(&xprt->transport_lock);
304 /* Don't race with disconnect */
305 if (!xprt_connected(xprt))
306 task->tk_status = -ENOTCONN;
307 else if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
308 xprt_wait_for_buffer_space(task);
310 spin_unlock_bh(&xprt->transport_lock);
312 /* Keep holding the socket if it is blocked */
313 rpc_delay(task, HZ>>4);
317 * xs_udp_send_request - write an RPC request to a UDP socket
318 * @task: address of RPC task that manages the state of an RPC request
321 * 0: The request has been sent
322 * EAGAIN: The socket was blocked, please call again later to
323 * complete the request
324 * ENOTCONN: Caller needs to invoke connect logic then call again
325 * other: Some other error occured, the request was not sent
327 static int xs_udp_send_request(struct rpc_task *task)
329 struct rpc_rqst *req = task->tk_rqstp;
330 struct rpc_xprt *xprt = req->rq_xprt;
331 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
332 struct xdr_buf *xdr = &req->rq_snd_buf;
335 xs_pktdump("packet data:",
336 req->rq_svec->iov_base,
337 req->rq_svec->iov_len);
339 req->rq_xtime = jiffies;
340 status = xs_sendpages(transport->sock,
341 (struct sockaddr *) &xprt->addr,
345 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
346 xdr->len - req->rq_bytes_sent, status);
348 if (likely(status >= (int) req->rq_slen))
351 /* Still some bytes left; set up for a retry later. */
359 /* When the server has died, an ICMP port unreachable message
360 * prompts ECONNREFUSED. */
366 dprintk("RPC: sendmsg returned unrecognized error %d\n",
374 static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
376 u32 reclen = buf->len - sizeof(rpc_fraghdr);
377 rpc_fraghdr *base = buf->head[0].iov_base;
378 *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
382 * xs_tcp_send_request - write an RPC request to a TCP socket
383 * @task: address of RPC task that manages the state of an RPC request
386 * 0: The request has been sent
387 * EAGAIN: The socket was blocked, please call again later to
388 * complete the request
389 * ENOTCONN: Caller needs to invoke connect logic then call again
390 * other: Some other error occured, the request was not sent
392 * XXX: In the case of soft timeouts, should we eventually give up
393 * if sendmsg is not able to make progress?
395 static int xs_tcp_send_request(struct rpc_task *task)
397 struct rpc_rqst *req = task->tk_rqstp;
398 struct rpc_xprt *xprt = req->rq_xprt;
399 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
400 struct xdr_buf *xdr = &req->rq_snd_buf;
401 int status, retry = 0;
403 xs_encode_tcp_record_marker(&req->rq_snd_buf);
405 xs_pktdump("packet data:",
406 req->rq_svec->iov_base,
407 req->rq_svec->iov_len);
409 /* Continue transmitting the packet/record. We must be careful
410 * to cope with writespace callbacks arriving _after_ we have
411 * called sendmsg(). */
413 req->rq_xtime = jiffies;
414 status = xs_sendpages(transport->sock,
415 NULL, 0, xdr, req->rq_bytes_sent);
417 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
418 xdr->len - req->rq_bytes_sent, status);
420 if (unlikely(status < 0))
423 /* If we've sent the entire packet, immediately
424 * reset the count of bytes sent. */
425 req->rq_bytes_sent += status;
426 task->tk_bytes_sent += status;
427 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
428 req->rq_bytes_sent = 0;
433 if (retry++ > XS_SENDMSG_RETRY)
448 dprintk("RPC: sendmsg returned unrecognized error %d\n",
450 xprt_disconnect(xprt);
458 * xs_tcp_release_xprt - clean up after a tcp transmission
462 * This cleans up if an error causes us to abort the transmission of a request.
463 * In this case, the socket may need to be reset in order to avoid confusing
466 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
468 struct rpc_rqst *req;
470 if (task != xprt->snd_task)
474 req = task->tk_rqstp;
475 if (req->rq_bytes_sent == 0)
477 if (req->rq_bytes_sent == req->rq_snd_buf.len)
479 set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
481 xprt_release_xprt(xprt, task);
485 * xs_close - close a socket
488 * This is used when all requests are complete; ie, no DRC state remains
489 * on the server we want to save.
491 static void xs_close(struct rpc_xprt *xprt)
493 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
494 struct socket *sock = transport->sock;
495 struct sock *sk = transport->inet;
498 goto clear_close_wait;
500 dprintk("RPC: xs_close xprt %p\n", xprt);
502 write_lock_bh(&sk->sk_callback_lock);
503 transport->inet = NULL;
504 transport->sock = NULL;
506 sk->sk_user_data = NULL;
507 sk->sk_data_ready = xprt->old_data_ready;
508 sk->sk_state_change = xprt->old_state_change;
509 sk->sk_write_space = xprt->old_write_space;
510 write_unlock_bh(&sk->sk_callback_lock);
516 smp_mb__before_clear_bit();
517 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
518 smp_mb__after_clear_bit();
522 * xs_destroy - prepare to shutdown a transport
523 * @xprt: doomed transport
526 static void xs_destroy(struct rpc_xprt *xprt)
528 dprintk("RPC: xs_destroy xprt %p\n", xprt);
530 cancel_delayed_work(&xprt->connect_worker);
531 flush_scheduled_work();
533 xprt_disconnect(xprt);
535 xs_free_peer_addresses(xprt);
540 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
542 return (struct rpc_xprt *) sk->sk_user_data;
546 * xs_udp_data_ready - "data ready" callback for UDP sockets
547 * @sk: socket with data to read
548 * @len: how much data to read
551 static void xs_udp_data_ready(struct sock *sk, int len)
553 struct rpc_task *task;
554 struct rpc_xprt *xprt;
555 struct rpc_rqst *rovr;
557 int err, repsize, copied;
561 read_lock(&sk->sk_callback_lock);
562 dprintk("RPC: xs_udp_data_ready...\n");
563 if (!(xprt = xprt_from_sock(sk)))
566 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
572 repsize = skb->len - sizeof(struct udphdr);
574 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
578 /* Copy the XID from the skb... */
579 xp = skb_header_pointer(skb, sizeof(struct udphdr),
580 sizeof(_xid), &_xid);
584 /* Look up and lock the request corresponding to the given XID */
585 spin_lock(&xprt->transport_lock);
586 rovr = xprt_lookup_rqst(xprt, *xp);
589 task = rovr->rq_task;
591 if ((copied = rovr->rq_private_buf.buflen) > repsize)
594 /* Suck it into the iovec, verify checksum if not done by hw. */
595 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
598 /* Something worked... */
599 dst_confirm(skb->dst);
601 xprt_adjust_cwnd(task, copied);
602 xprt_update_rtt(task);
603 xprt_complete_rqst(task, copied);
606 spin_unlock(&xprt->transport_lock);
608 skb_free_datagram(sk, skb);
610 read_unlock(&sk->sk_callback_lock);
613 static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
615 if (len > desc->count)
617 if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
618 dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n",
624 dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n",
629 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
634 p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
635 len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
636 used = xs_tcp_copy_data(desc, p, len);
637 xprt->tcp_offset += used;
641 xprt->tcp_reclen = ntohl(xprt->tcp_recm);
642 if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
643 xprt->tcp_flags |= XPRT_LAST_FRAG;
645 xprt->tcp_flags &= ~XPRT_LAST_FRAG;
646 xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
648 xprt->tcp_flags &= ~XPRT_COPY_RECM;
649 xprt->tcp_offset = 0;
651 /* Sanity check of the record length */
652 if (unlikely(xprt->tcp_reclen < 4)) {
653 dprintk("RPC: invalid TCP record fragment length\n");
654 xprt_disconnect(xprt);
657 dprintk("RPC: reading TCP record fragment of length %d\n",
661 static void xs_tcp_check_recm(struct rpc_xprt *xprt)
663 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
664 xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
665 if (xprt->tcp_offset == xprt->tcp_reclen) {
666 xprt->tcp_flags |= XPRT_COPY_RECM;
667 xprt->tcp_offset = 0;
668 if (xprt->tcp_flags & XPRT_LAST_FRAG) {
669 xprt->tcp_flags &= ~XPRT_COPY_DATA;
670 xprt->tcp_flags |= XPRT_COPY_XID;
671 xprt->tcp_copied = 0;
676 static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
681 len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
682 dprintk("RPC: reading XID (%Zu bytes)\n", len);
683 p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
684 used = xs_tcp_copy_data(desc, p, len);
685 xprt->tcp_offset += used;
688 xprt->tcp_flags &= ~XPRT_COPY_XID;
689 xprt->tcp_flags |= XPRT_COPY_DATA;
690 xprt->tcp_copied = 4;
691 dprintk("RPC: reading reply for XID %08x\n",
692 ntohl(xprt->tcp_xid));
693 xs_tcp_check_recm(xprt);
696 static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
698 struct rpc_rqst *req;
699 struct xdr_buf *rcvbuf;
703 /* Find and lock the request corresponding to this xid */
704 spin_lock(&xprt->transport_lock);
705 req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
707 xprt->tcp_flags &= ~XPRT_COPY_DATA;
708 dprintk("RPC: XID %08x request not found!\n",
709 ntohl(xprt->tcp_xid));
710 spin_unlock(&xprt->transport_lock);
714 rcvbuf = &req->rq_private_buf;
716 if (len > xprt->tcp_reclen - xprt->tcp_offset) {
717 skb_reader_t my_desc;
719 len = xprt->tcp_reclen - xprt->tcp_offset;
720 memcpy(&my_desc, desc, sizeof(my_desc));
722 r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
723 &my_desc, xs_tcp_copy_data);
727 r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
728 desc, xs_tcp_copy_data);
731 xprt->tcp_copied += r;
732 xprt->tcp_offset += r;
735 /* Error when copying to the receive buffer,
736 * usually because we weren't able to allocate
737 * additional buffer pages. All we can do now
738 * is turn off XPRT_COPY_DATA, so the request
739 * will not receive any additional updates,
741 * Any remaining data from this record will
744 xprt->tcp_flags &= ~XPRT_COPY_DATA;
745 dprintk("RPC: XID %08x truncated request\n",
746 ntohl(xprt->tcp_xid));
747 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
748 xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
752 dprintk("RPC: XID %08x read %Zd bytes\n",
753 ntohl(xprt->tcp_xid), r);
754 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
755 xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
757 if (xprt->tcp_copied == req->rq_private_buf.buflen)
758 xprt->tcp_flags &= ~XPRT_COPY_DATA;
759 else if (xprt->tcp_offset == xprt->tcp_reclen) {
760 if (xprt->tcp_flags & XPRT_LAST_FRAG)
761 xprt->tcp_flags &= ~XPRT_COPY_DATA;
765 if (!(xprt->tcp_flags & XPRT_COPY_DATA))
766 xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
767 spin_unlock(&xprt->transport_lock);
768 xs_tcp_check_recm(xprt);
771 static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
775 len = xprt->tcp_reclen - xprt->tcp_offset;
776 if (len > desc->count)
780 xprt->tcp_offset += len;
781 dprintk("RPC: discarded %Zu bytes\n", len);
782 xs_tcp_check_recm(xprt);
785 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
787 struct rpc_xprt *xprt = rd_desc->arg.data;
788 skb_reader_t desc = {
795 dprintk("RPC: xs_tcp_data_recv started\n");
797 /* Read in a new fragment marker if necessary */
798 /* Can we ever really expect to get completely empty fragments? */
799 if (xprt->tcp_flags & XPRT_COPY_RECM) {
800 xs_tcp_read_fraghdr(xprt, &desc);
803 /* Read in the xid if necessary */
804 if (xprt->tcp_flags & XPRT_COPY_XID) {
805 xs_tcp_read_xid(xprt, &desc);
808 /* Read in the request data */
809 if (xprt->tcp_flags & XPRT_COPY_DATA) {
810 xs_tcp_read_request(xprt, &desc);
813 /* Skip over any trailing bytes on short reads */
814 xs_tcp_read_discard(xprt, &desc);
815 } while (desc.count);
816 dprintk("RPC: xs_tcp_data_recv done\n");
817 return len - desc.count;
821 * xs_tcp_data_ready - "data ready" callback for TCP sockets
822 * @sk: socket with data to read
823 * @bytes: how much data to read
826 static void xs_tcp_data_ready(struct sock *sk, int bytes)
828 struct rpc_xprt *xprt;
829 read_descriptor_t rd_desc;
831 read_lock(&sk->sk_callback_lock);
832 dprintk("RPC: xs_tcp_data_ready...\n");
833 if (!(xprt = xprt_from_sock(sk)))
838 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
839 rd_desc.arg.data = xprt;
840 rd_desc.count = 65536;
841 tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
843 read_unlock(&sk->sk_callback_lock);
847 * xs_tcp_state_change - callback to handle TCP socket state changes
848 * @sk: socket whose state has changed
851 static void xs_tcp_state_change(struct sock *sk)
853 struct rpc_xprt *xprt;
855 read_lock(&sk->sk_callback_lock);
856 if (!(xprt = xprt_from_sock(sk)))
858 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
859 dprintk("RPC: state %x conn %d dead %d zapped %d\n",
860 sk->sk_state, xprt_connected(xprt),
861 sock_flag(sk, SOCK_DEAD),
862 sock_flag(sk, SOCK_ZAPPED));
864 switch (sk->sk_state) {
865 case TCP_ESTABLISHED:
866 spin_lock_bh(&xprt->transport_lock);
867 if (!xprt_test_and_set_connected(xprt)) {
868 /* Reset TCP record info */
869 xprt->tcp_offset = 0;
870 xprt->tcp_reclen = 0;
871 xprt->tcp_copied = 0;
872 xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
873 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
874 xprt_wake_pending_tasks(xprt, 0);
876 spin_unlock_bh(&xprt->transport_lock);
882 /* Try to schedule an autoclose RPC calls */
883 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
884 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
885 schedule_work(&xprt->task_cleanup);
887 xprt_disconnect(xprt);
890 read_unlock(&sk->sk_callback_lock);
894 * xs_udp_write_space - callback invoked when socket buffer space
896 * @sk: socket whose state has changed
898 * Called when more output buffer space is available for this socket.
899 * We try not to wake our writers until they can make "significant"
900 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
901 * with a bunch of small requests.
903 static void xs_udp_write_space(struct sock *sk)
905 read_lock(&sk->sk_callback_lock);
907 /* from net/core/sock.c:sock_def_write_space */
908 if (sock_writeable(sk)) {
910 struct rpc_xprt *xprt;
912 if (unlikely(!(sock = sk->sk_socket)))
914 if (unlikely(!(xprt = xprt_from_sock(sk))))
916 if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
919 xprt_write_space(xprt);
923 read_unlock(&sk->sk_callback_lock);
927 * xs_tcp_write_space - callback invoked when socket buffer space
929 * @sk: socket whose state has changed
931 * Called when more output buffer space is available for this socket.
932 * We try not to wake our writers until they can make "significant"
933 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
934 * with a bunch of small requests.
936 static void xs_tcp_write_space(struct sock *sk)
938 read_lock(&sk->sk_callback_lock);
940 /* from net/core/stream.c:sk_stream_write_space */
941 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
943 struct rpc_xprt *xprt;
945 if (unlikely(!(sock = sk->sk_socket)))
947 if (unlikely(!(xprt = xprt_from_sock(sk))))
949 if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
952 xprt_write_space(xprt);
956 read_unlock(&sk->sk_callback_lock);
959 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
961 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
962 struct sock *sk = transport->inet;
965 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
966 sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2;
969 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
970 sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
971 sk->sk_write_space(sk);
976 * xs_udp_set_buffer_size - set send and receive limits
977 * @xprt: generic transport
978 * @sndsize: requested size of send buffer, in bytes
979 * @rcvsize: requested size of receive buffer, in bytes
981 * Set socket send and receive buffer size limits.
983 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
987 xprt->sndsize = sndsize + 1024;
990 xprt->rcvsize = rcvsize + 1024;
992 xs_udp_do_set_buffer_size(xprt);
996 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
997 * @task: task that timed out
999 * Adjust the congestion window after a retransmit timeout has occurred.
1001 static void xs_udp_timer(struct rpc_task *task)
1003 xprt_adjust_cwnd(task, -ETIMEDOUT);
1006 static unsigned short xs_get_random_port(void)
1008 unsigned short range = xprt_max_resvport - xprt_min_resvport;
1009 unsigned short rand = (unsigned short) net_random() % range;
1010 return rand + xprt_min_resvport;
1014 * xs_print_peer_address - format an IPv4 address for printing
1015 * @xprt: generic transport
1016 * @format: flags field indicating which parts of the address to render
1018 static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format)
1020 if (xprt->address_strings[format] != NULL)
1021 return xprt->address_strings[format];
1023 return "unprintable";
1027 * xs_set_port - reset the port number in the remote endpoint address
1028 * @xprt: generic transport
1029 * @port: new port number
1032 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1034 struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr;
1036 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1038 sap->sin_port = htons(port);
1041 static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1043 struct sockaddr_in myaddr = {
1044 .sin_family = AF_INET,
1047 unsigned short port = xprt->port;
1050 myaddr.sin_port = htons(port);
1051 err = kernel_bind(sock, (struct sockaddr *) &myaddr,
1055 dprintk("RPC: xs_bindresvport bound to port %u\n",
1059 if (port <= xprt_min_resvport)
1060 port = xprt_max_resvport;
1063 } while (err == -EADDRINUSE && port != xprt->port);
1065 dprintk("RPC: can't bind to reserved port (%d).\n", -err);
1070 * xs_udp_connect_worker - set up a UDP socket
1071 * @args: RPC transport to connect
1073 * Invoked by a work queue tasklet.
1075 static void xs_udp_connect_worker(void *args)
1077 struct rpc_xprt *xprt = (struct rpc_xprt *) args;
1078 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1079 struct socket *sock = transport->sock;
1080 int err, status = -EIO;
1082 if (xprt->shutdown || !xprt_bound(xprt))
1085 /* Start by resetting any existing state */
1088 if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) {
1089 dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
1093 if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
1098 dprintk("RPC: worker connecting xprt %p to address: %s\n",
1099 xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1101 if (!transport->inet) {
1102 struct sock *sk = sock->sk;
1104 write_lock_bh(&sk->sk_callback_lock);
1106 sk->sk_user_data = xprt;
1107 xprt->old_data_ready = sk->sk_data_ready;
1108 xprt->old_state_change = sk->sk_state_change;
1109 xprt->old_write_space = sk->sk_write_space;
1110 sk->sk_data_ready = xs_udp_data_ready;
1111 sk->sk_write_space = xs_udp_write_space;
1112 sk->sk_no_check = UDP_CSUM_NORCV;
1113 sk->sk_allocation = GFP_ATOMIC;
1115 xprt_set_connected(xprt);
1117 /* Reset to new socket */
1118 transport->sock = sock;
1119 transport->inet = sk;
1121 write_unlock_bh(&sk->sk_callback_lock);
1123 xs_udp_do_set_buffer_size(xprt);
1126 xprt_wake_pending_tasks(xprt, status);
1127 xprt_clear_connecting(xprt);
1131 * We need to preserve the port number so the reply cache on the server can
1132 * find our cached RPC replies when we get around to reconnecting.
1134 static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1137 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1138 struct sockaddr any;
1140 dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt);
1143 * Disconnect the transport socket by doing a connect operation
1144 * with AF_UNSPEC. This should return immediately...
1146 memset(&any, 0, sizeof(any));
1147 any.sa_family = AF_UNSPEC;
1148 result = kernel_connect(transport->sock, &any, sizeof(any), 0);
1150 dprintk("RPC: AF_UNSPEC connect return code %d\n",
1155 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
1156 * @args: RPC transport to connect
1158 * Invoked by a work queue tasklet.
1160 static void xs_tcp_connect_worker(void *args)
1162 struct rpc_xprt *xprt = (struct rpc_xprt *)args;
1163 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1164 struct socket *sock = transport->sock;
1165 int err, status = -EIO;
1167 if (xprt->shutdown || !xprt_bound(xprt))
1171 /* start from scratch */
1172 if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
1173 dprintk("RPC: can't create TCP transport socket (%d).\n", -err);
1177 if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
1182 /* "close" the socket, preserving the local port */
1183 xs_tcp_reuse_connection(xprt);
1185 dprintk("RPC: worker connecting xprt %p to address: %s\n",
1186 xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1188 if (!transport->inet) {
1189 struct sock *sk = sock->sk;
1191 write_lock_bh(&sk->sk_callback_lock);
1193 sk->sk_user_data = xprt;
1194 xprt->old_data_ready = sk->sk_data_ready;
1195 xprt->old_state_change = sk->sk_state_change;
1196 xprt->old_write_space = sk->sk_write_space;
1197 sk->sk_data_ready = xs_tcp_data_ready;
1198 sk->sk_state_change = xs_tcp_state_change;
1199 sk->sk_write_space = xs_tcp_write_space;
1200 sk->sk_allocation = GFP_ATOMIC;
1202 /* socket options */
1203 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
1204 sock_reset_flag(sk, SOCK_LINGER);
1205 tcp_sk(sk)->linger2 = 0;
1206 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1208 xprt_clear_connected(xprt);
1210 /* Reset to new socket */
1211 transport->sock = sock;
1212 transport->inet = sk;
1214 write_unlock_bh(&sk->sk_callback_lock);
1217 /* Tell the socket layer to start connecting... */
1218 xprt->stat.connect_count++;
1219 xprt->stat.connect_start = jiffies;
1220 status = kernel_connect(sock, (struct sockaddr *) &xprt->addr,
1221 xprt->addrlen, O_NONBLOCK);
1222 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
1223 xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
1231 /* retry with existing socket, after a delay */
1234 /* get rid of existing socket, and retry */
1240 xprt_wake_pending_tasks(xprt, status);
1242 xprt_clear_connecting(xprt);
1246 * xs_connect - connect a socket to a remote endpoint
1247 * @task: address of RPC task that manages state of connect request
1249 * TCP: If the remote end dropped the connection, delay reconnecting.
1251 * UDP socket connects are synchronous, but we use a work queue anyway
1252 * to guarantee that even unprivileged user processes can set up a
1253 * socket on a privileged port.
1255 * If a UDP socket connect fails, the delay behavior here prevents
1256 * retry floods (hard mounts).
1258 static void xs_connect(struct rpc_task *task)
1260 struct rpc_xprt *xprt = task->tk_xprt;
1261 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1263 if (xprt_test_and_set_connecting(xprt))
1266 if (transport->sock != NULL) {
1267 dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n",
1268 xprt, xprt->reestablish_timeout / HZ);
1269 schedule_delayed_work(&xprt->connect_worker,
1270 xprt->reestablish_timeout);
1271 xprt->reestablish_timeout <<= 1;
1272 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
1273 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
1275 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
1276 schedule_work(&xprt->connect_worker);
1278 /* flush_scheduled_work can sleep... */
1279 if (!RPC_IS_ASYNC(task))
1280 flush_scheduled_work();
1285 * xs_udp_print_stats - display UDP socket-specifc stats
1286 * @xprt: rpc_xprt struct containing statistics
1290 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1292 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
1294 xprt->stat.bind_count,
1297 xprt->stat.bad_xids,
1299 xprt->stat.bklog_u);
1303 * xs_tcp_print_stats - display TCP socket-specifc stats
1304 * @xprt: rpc_xprt struct containing statistics
1308 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1312 if (xprt_connected(xprt))
1313 idle_time = (long)(jiffies - xprt->last_used) / HZ;
1315 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
1317 xprt->stat.bind_count,
1318 xprt->stat.connect_count,
1319 xprt->stat.connect_time,
1323 xprt->stat.bad_xids,
1325 xprt->stat.bklog_u);
1328 static struct rpc_xprt_ops xs_udp_ops = {
1329 .set_buffer_size = xs_udp_set_buffer_size,
1330 .print_addr = xs_print_peer_address,
1331 .reserve_xprt = xprt_reserve_xprt_cong,
1332 .release_xprt = xprt_release_xprt_cong,
1333 .rpcbind = rpc_getport,
1334 .set_port = xs_set_port,
1335 .connect = xs_connect,
1336 .buf_alloc = rpc_malloc,
1337 .buf_free = rpc_free,
1338 .send_request = xs_udp_send_request,
1339 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
1340 .timer = xs_udp_timer,
1341 .release_request = xprt_release_rqst_cong,
1343 .destroy = xs_destroy,
1344 .print_stats = xs_udp_print_stats,
1347 static struct rpc_xprt_ops xs_tcp_ops = {
1348 .print_addr = xs_print_peer_address,
1349 .reserve_xprt = xprt_reserve_xprt,
1350 .release_xprt = xs_tcp_release_xprt,
1351 .rpcbind = rpc_getport,
1352 .set_port = xs_set_port,
1353 .connect = xs_connect,
1354 .buf_alloc = rpc_malloc,
1355 .buf_free = rpc_free,
1356 .send_request = xs_tcp_send_request,
1357 .set_retrans_timeout = xprt_set_retrans_timeout_def,
1359 .destroy = xs_destroy,
1360 .print_stats = xs_tcp_print_stats,
1363 static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size)
1365 struct rpc_xprt *xprt;
1366 struct sock_xprt *new;
1368 if (addrlen > sizeof(xprt->addr)) {
1369 dprintk("RPC: xs_setup_xprt: address too large\n");
1370 return ERR_PTR(-EBADF);
1373 new = kzalloc(sizeof(*new), GFP_KERNEL);
1375 dprintk("RPC: xs_setup_xprt: couldn't allocate rpc_xprt\n");
1376 return ERR_PTR(-ENOMEM);
1380 xprt->max_reqs = slot_table_size;
1381 xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
1382 if (xprt->slot == NULL) {
1384 dprintk("RPC: xs_setup_xprt: couldn't allocate slot table\n");
1385 return ERR_PTR(-ENOMEM);
1388 memcpy(&xprt->addr, addr, addrlen);
1389 xprt->addrlen = addrlen;
1390 xprt->port = xs_get_random_port();
1396 * xs_setup_udp - Set up transport to use a UDP socket
1397 * @addr: address of remote server
1398 * @addrlen: length of address in bytes
1399 * @to: timeout parameters
1402 struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
1404 struct rpc_xprt *xprt;
1406 xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries);
1410 if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
1411 xprt_set_bound(xprt);
1413 xprt->prot = IPPROTO_UDP;
1415 /* XXX: header size can vary due to auth type, IPv6, etc. */
1416 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1418 INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
1419 xprt->bind_timeout = XS_BIND_TO;
1420 xprt->connect_timeout = XS_UDP_CONN_TO;
1421 xprt->reestablish_timeout = XS_UDP_REEST_TO;
1422 xprt->idle_timeout = XS_IDLE_DISC_TO;
1424 xprt->ops = &xs_udp_ops;
1427 xprt->timeout = *to;
1429 xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
1431 xs_format_peer_addresses(xprt);
1432 dprintk("RPC: set up transport to address %s\n",
1433 xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
1439 * xs_setup_tcp - Set up transport to use a TCP socket
1440 * @addr: address of remote server
1441 * @addrlen: length of address in bytes
1442 * @to: timeout parameters
1445 struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
1447 struct rpc_xprt *xprt;
1449 xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries);
1453 if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
1454 xprt_set_bound(xprt);
1456 xprt->prot = IPPROTO_TCP;
1457 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1458 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1460 INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
1461 xprt->bind_timeout = XS_BIND_TO;
1462 xprt->connect_timeout = XS_TCP_CONN_TO;
1463 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1464 xprt->idle_timeout = XS_IDLE_DISC_TO;
1466 xprt->ops = &xs_tcp_ops;
1469 xprt->timeout = *to;
1471 xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
1473 xs_format_peer_addresses(xprt);
1474 dprintk("RPC: set up transport to address %s\n",
1475 xs_print_peer_address(xprt, RPC_DISPLAY_ALL));