2 * common code for virtio vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/sched/signal.h>
13 #include <linux/ctype.h>
14 #include <linux/list.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_ids.h>
17 #include <linux/virtio_config.h>
18 #include <linux/virtio_vsock.h>
21 #include <net/af_vsock.h>
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/vsock_virtio_transport_common.h>
26 /* How long to wait for graceful shutdown of a connection */
27 #define VSOCK_CLOSE_TIMEOUT (8 * HZ)
29 static const struct virtio_transport *virtio_transport_get_ops(void)
31 const struct vsock_transport *t = vsock_core_get_transport();
33 return container_of(t, struct virtio_transport, transport);
36 static struct virtio_vsock_pkt *
37 virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
44 struct virtio_vsock_pkt *pkt;
47 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
51 pkt->hdr.type = cpu_to_le16(info->type);
52 pkt->hdr.op = cpu_to_le16(info->op);
53 pkt->hdr.src_cid = cpu_to_le64(src_cid);
54 pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
55 pkt->hdr.src_port = cpu_to_le32(src_port);
56 pkt->hdr.dst_port = cpu_to_le32(dst_port);
57 pkt->hdr.flags = cpu_to_le32(info->flags);
59 pkt->hdr.len = cpu_to_le32(len);
60 pkt->reply = info->reply;
63 if (info->msg && len > 0) {
64 pkt->buf = kmalloc(len, GFP_KERNEL);
67 err = memcpy_from_msg(pkt->buf, info->msg, len);
72 trace_virtio_transport_alloc_pkt(src_cid, src_port,
88 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
89 struct virtio_vsock_pkt_info *info)
91 u32 src_cid, src_port, dst_cid, dst_port;
92 struct virtio_vsock_sock *vvs;
93 struct virtio_vsock_pkt *pkt;
94 u32 pkt_len = info->pkt_len;
96 src_cid = vm_sockets_get_local_cid();
97 src_port = vsk->local_addr.svm_port;
98 if (!info->remote_cid) {
99 dst_cid = vsk->remote_addr.svm_cid;
100 dst_port = vsk->remote_addr.svm_port;
102 dst_cid = info->remote_cid;
103 dst_port = info->remote_port;
108 /* we can send less than pkt_len bytes */
109 if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE)
110 pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
112 /* virtio_transport_get_credit might return less than pkt_len credit */
113 pkt_len = virtio_transport_get_credit(vvs, pkt_len);
115 /* Do not send zero length OP_RW pkt */
116 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
119 pkt = virtio_transport_alloc_pkt(info, pkt_len,
123 virtio_transport_put_credit(vvs, pkt_len);
127 virtio_transport_inc_tx_pkt(vvs, pkt);
129 return virtio_transport_get_ops()->send_pkt(pkt);
132 static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
133 struct virtio_vsock_pkt *pkt)
135 vvs->rx_bytes += pkt->len;
138 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
139 struct virtio_vsock_pkt *pkt)
141 vvs->rx_bytes -= pkt->len;
142 vvs->fwd_cnt += pkt->len;
145 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
147 spin_lock_bh(&vvs->tx_lock);
148 pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
149 pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
150 spin_unlock_bh(&vvs->tx_lock);
152 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
154 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
158 spin_lock_bh(&vvs->tx_lock);
159 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
163 spin_unlock_bh(&vvs->tx_lock);
167 EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
169 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
171 spin_lock_bh(&vvs->tx_lock);
172 vvs->tx_cnt -= credit;
173 spin_unlock_bh(&vvs->tx_lock);
175 EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
177 static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
179 struct virtio_vsock_hdr *hdr)
181 struct virtio_vsock_pkt_info info = {
182 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
187 return virtio_transport_send_pkt_info(vsk, &info);
191 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
195 struct virtio_vsock_sock *vvs = vsk->trans;
196 struct virtio_vsock_pkt *pkt;
197 size_t bytes, total = 0;
200 spin_lock_bh(&vvs->rx_lock);
201 while (total < len && !list_empty(&vvs->rx_queue)) {
202 pkt = list_first_entry(&vvs->rx_queue,
203 struct virtio_vsock_pkt, list);
206 if (bytes > pkt->len - pkt->off)
207 bytes = pkt->len - pkt->off;
209 /* sk_lock is held by caller so no one else can dequeue.
210 * Unlock rx_lock since memcpy_to_msg() may sleep.
212 spin_unlock_bh(&vvs->rx_lock);
214 err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
218 spin_lock_bh(&vvs->rx_lock);
222 if (pkt->off == pkt->len) {
223 virtio_transport_dec_rx_pkt(vvs, pkt);
224 list_del(&pkt->list);
225 virtio_transport_free_pkt(pkt);
228 spin_unlock_bh(&vvs->rx_lock);
230 /* Send a credit pkt to peer */
231 virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
243 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
245 size_t len, int flags)
247 if (flags & MSG_PEEK)
250 return virtio_transport_stream_do_dequeue(vsk, msg, len);
252 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
255 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
257 size_t len, int flags)
261 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
263 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
265 struct virtio_vsock_sock *vvs = vsk->trans;
268 spin_lock_bh(&vvs->rx_lock);
269 bytes = vvs->rx_bytes;
270 spin_unlock_bh(&vvs->rx_lock);
274 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
276 static s64 virtio_transport_has_space(struct vsock_sock *vsk)
278 struct virtio_vsock_sock *vvs = vsk->trans;
281 bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
288 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
290 struct virtio_vsock_sock *vvs = vsk->trans;
293 spin_lock_bh(&vvs->tx_lock);
294 bytes = virtio_transport_has_space(vsk);
295 spin_unlock_bh(&vvs->tx_lock);
299 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
301 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
302 struct vsock_sock *psk)
304 struct virtio_vsock_sock *vvs;
306 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
313 struct virtio_vsock_sock *ptrans = psk->trans;
315 vvs->buf_size = ptrans->buf_size;
316 vvs->buf_size_min = ptrans->buf_size_min;
317 vvs->buf_size_max = ptrans->buf_size_max;
318 vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
320 vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
321 vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
322 vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
325 vvs->buf_alloc = vvs->buf_size;
327 spin_lock_init(&vvs->rx_lock);
328 spin_lock_init(&vvs->tx_lock);
329 INIT_LIST_HEAD(&vvs->rx_queue);
333 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
335 u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
337 struct virtio_vsock_sock *vvs = vsk->trans;
339 return vvs->buf_size;
341 EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
343 u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
345 struct virtio_vsock_sock *vvs = vsk->trans;
347 return vvs->buf_size_min;
349 EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
351 u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
353 struct virtio_vsock_sock *vvs = vsk->trans;
355 return vvs->buf_size_max;
357 EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
359 void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
361 struct virtio_vsock_sock *vvs = vsk->trans;
363 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
364 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
365 if (val < vvs->buf_size_min)
366 vvs->buf_size_min = val;
367 if (val > vvs->buf_size_max)
368 vvs->buf_size_max = val;
370 vvs->buf_alloc = val;
372 EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
374 void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
376 struct virtio_vsock_sock *vvs = vsk->trans;
378 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
379 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
380 if (val > vvs->buf_size)
382 vvs->buf_size_min = val;
384 EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
386 void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
388 struct virtio_vsock_sock *vvs = vsk->trans;
390 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
391 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
392 if (val < vvs->buf_size)
394 vvs->buf_size_max = val;
396 EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
399 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
401 bool *data_ready_now)
403 if (vsock_stream_has_data(vsk))
404 *data_ready_now = true;
406 *data_ready_now = false;
410 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
413 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
415 bool *space_avail_now)
419 free_space = vsock_stream_has_space(vsk);
421 *space_avail_now = true;
422 else if (free_space == 0)
423 *space_avail_now = false;
427 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
429 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
430 size_t target, struct vsock_transport_recv_notify_data *data)
434 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
436 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
437 size_t target, struct vsock_transport_recv_notify_data *data)
441 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
443 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
444 size_t target, struct vsock_transport_recv_notify_data *data)
448 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
450 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
451 size_t target, ssize_t copied, bool data_read,
452 struct vsock_transport_recv_notify_data *data)
456 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
458 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
459 struct vsock_transport_send_notify_data *data)
463 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
465 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
466 struct vsock_transport_send_notify_data *data)
470 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
472 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
473 struct vsock_transport_send_notify_data *data)
477 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
479 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
480 ssize_t written, struct vsock_transport_send_notify_data *data)
484 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
486 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
488 struct virtio_vsock_sock *vvs = vsk->trans;
490 return vvs->buf_size;
492 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
494 bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
498 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
500 bool virtio_transport_stream_allow(u32 cid, u32 port)
504 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
506 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
507 struct sockaddr_vm *addr)
511 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
513 bool virtio_transport_dgram_allow(u32 cid, u32 port)
517 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
519 int virtio_transport_connect(struct vsock_sock *vsk)
521 struct virtio_vsock_pkt_info info = {
522 .op = VIRTIO_VSOCK_OP_REQUEST,
523 .type = VIRTIO_VSOCK_TYPE_STREAM,
527 return virtio_transport_send_pkt_info(vsk, &info);
529 EXPORT_SYMBOL_GPL(virtio_transport_connect);
531 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
533 struct virtio_vsock_pkt_info info = {
534 .op = VIRTIO_VSOCK_OP_SHUTDOWN,
535 .type = VIRTIO_VSOCK_TYPE_STREAM,
536 .flags = (mode & RCV_SHUTDOWN ?
537 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
538 (mode & SEND_SHUTDOWN ?
539 VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
543 return virtio_transport_send_pkt_info(vsk, &info);
545 EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
548 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
549 struct sockaddr_vm *remote_addr,
555 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
558 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
562 struct virtio_vsock_pkt_info info = {
563 .op = VIRTIO_VSOCK_OP_RW,
564 .type = VIRTIO_VSOCK_TYPE_STREAM,
570 return virtio_transport_send_pkt_info(vsk, &info);
572 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
574 void virtio_transport_destruct(struct vsock_sock *vsk)
576 struct virtio_vsock_sock *vvs = vsk->trans;
580 EXPORT_SYMBOL_GPL(virtio_transport_destruct);
582 static int virtio_transport_reset(struct vsock_sock *vsk,
583 struct virtio_vsock_pkt *pkt)
585 struct virtio_vsock_pkt_info info = {
586 .op = VIRTIO_VSOCK_OP_RST,
587 .type = VIRTIO_VSOCK_TYPE_STREAM,
592 /* Send RST only if the original pkt is not a RST pkt */
593 if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
596 return virtio_transport_send_pkt_info(vsk, &info);
599 /* Normally packets are associated with a socket. There may be no socket if an
600 * attempt was made to connect to a socket that does not exist.
602 static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
604 struct virtio_vsock_pkt_info info = {
605 .op = VIRTIO_VSOCK_OP_RST,
606 .type = le16_to_cpu(pkt->hdr.type),
610 /* Send RST only if the original pkt is not a RST pkt */
611 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
614 pkt = virtio_transport_alloc_pkt(&info, 0,
615 le64_to_cpu(pkt->hdr.dst_cid),
616 le32_to_cpu(pkt->hdr.dst_port),
617 le64_to_cpu(pkt->hdr.src_cid),
618 le32_to_cpu(pkt->hdr.src_port));
622 return virtio_transport_get_ops()->send_pkt(pkt);
625 static void virtio_transport_wait_close(struct sock *sk, long timeout)
628 DEFINE_WAIT_FUNC(wait, woken_wake_function);
630 add_wait_queue(sk_sleep(sk), &wait);
633 if (sk_wait_event(sk, &timeout,
634 sock_flag(sk, SOCK_DONE), &wait))
636 } while (!signal_pending(current) && timeout);
638 remove_wait_queue(sk_sleep(sk), &wait);
642 static void virtio_transport_do_close(struct vsock_sock *vsk,
645 struct sock *sk = sk_vsock(vsk);
647 sock_set_flag(sk, SOCK_DONE);
648 vsk->peer_shutdown = SHUTDOWN_MASK;
649 if (vsock_stream_has_data(vsk) <= 0)
650 sk->sk_state = SS_DISCONNECTING;
651 sk->sk_state_change(sk);
653 if (vsk->close_work_scheduled &&
654 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
655 vsk->close_work_scheduled = false;
657 vsock_remove_sock(vsk);
659 /* Release refcnt obtained when we scheduled the timeout */
664 static void virtio_transport_close_timeout(struct work_struct *work)
666 struct vsock_sock *vsk =
667 container_of(work, struct vsock_sock, close_work.work);
668 struct sock *sk = sk_vsock(vsk);
673 if (!sock_flag(sk, SOCK_DONE)) {
674 (void)virtio_transport_reset(vsk, NULL);
676 virtio_transport_do_close(vsk, false);
679 vsk->close_work_scheduled = false;
685 /* User context, vsk->sk is locked */
686 static bool virtio_transport_close(struct vsock_sock *vsk)
688 struct sock *sk = &vsk->sk;
690 if (!(sk->sk_state == SS_CONNECTED ||
691 sk->sk_state == SS_DISCONNECTING))
694 /* Already received SHUTDOWN from peer, reply with RST */
695 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
696 (void)virtio_transport_reset(vsk, NULL);
700 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
701 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
703 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
704 virtio_transport_wait_close(sk, sk->sk_lingertime);
706 if (sock_flag(sk, SOCK_DONE)) {
711 INIT_DELAYED_WORK(&vsk->close_work,
712 virtio_transport_close_timeout);
713 vsk->close_work_scheduled = true;
714 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
718 void virtio_transport_release(struct vsock_sock *vsk)
720 struct sock *sk = &vsk->sk;
721 bool remove_sock = true;
724 if (sk->sk_type == SOCK_STREAM)
725 remove_sock = virtio_transport_close(vsk);
729 vsock_remove_sock(vsk);
731 EXPORT_SYMBOL_GPL(virtio_transport_release);
734 virtio_transport_recv_connecting(struct sock *sk,
735 struct virtio_vsock_pkt *pkt)
737 struct vsock_sock *vsk = vsock_sk(sk);
741 switch (le16_to_cpu(pkt->hdr.op)) {
742 case VIRTIO_VSOCK_OP_RESPONSE:
743 sk->sk_state = SS_CONNECTED;
744 sk->sk_socket->state = SS_CONNECTED;
745 vsock_insert_connected(vsk);
746 sk->sk_state_change(sk);
748 case VIRTIO_VSOCK_OP_INVALID:
750 case VIRTIO_VSOCK_OP_RST:
762 virtio_transport_reset(vsk, pkt);
763 sk->sk_state = SS_UNCONNECTED;
765 sk->sk_error_report(sk);
770 virtio_transport_recv_connected(struct sock *sk,
771 struct virtio_vsock_pkt *pkt)
773 struct vsock_sock *vsk = vsock_sk(sk);
774 struct virtio_vsock_sock *vvs = vsk->trans;
777 switch (le16_to_cpu(pkt->hdr.op)) {
778 case VIRTIO_VSOCK_OP_RW:
779 pkt->len = le32_to_cpu(pkt->hdr.len);
782 spin_lock_bh(&vvs->rx_lock);
783 virtio_transport_inc_rx_pkt(vvs, pkt);
784 list_add_tail(&pkt->list, &vvs->rx_queue);
785 spin_unlock_bh(&vvs->rx_lock);
787 sk->sk_data_ready(sk);
789 case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
790 sk->sk_write_space(sk);
792 case VIRTIO_VSOCK_OP_SHUTDOWN:
793 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
794 vsk->peer_shutdown |= RCV_SHUTDOWN;
795 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
796 vsk->peer_shutdown |= SEND_SHUTDOWN;
797 if (vsk->peer_shutdown == SHUTDOWN_MASK &&
798 vsock_stream_has_data(vsk) <= 0)
799 sk->sk_state = SS_DISCONNECTING;
800 if (le32_to_cpu(pkt->hdr.flags))
801 sk->sk_state_change(sk);
803 case VIRTIO_VSOCK_OP_RST:
804 virtio_transport_do_close(vsk, true);
811 virtio_transport_free_pkt(pkt);
816 virtio_transport_recv_disconnecting(struct sock *sk,
817 struct virtio_vsock_pkt *pkt)
819 struct vsock_sock *vsk = vsock_sk(sk);
821 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
822 virtio_transport_do_close(vsk, true);
826 virtio_transport_send_response(struct vsock_sock *vsk,
827 struct virtio_vsock_pkt *pkt)
829 struct virtio_vsock_pkt_info info = {
830 .op = VIRTIO_VSOCK_OP_RESPONSE,
831 .type = VIRTIO_VSOCK_TYPE_STREAM,
832 .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
833 .remote_port = le32_to_cpu(pkt->hdr.src_port),
838 return virtio_transport_send_pkt_info(vsk, &info);
841 /* Handle server socket */
843 virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
845 struct vsock_sock *vsk = vsock_sk(sk);
846 struct vsock_sock *vchild;
849 if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
850 virtio_transport_reset(vsk, pkt);
854 if (sk_acceptq_is_full(sk)) {
855 virtio_transport_reset(vsk, pkt);
859 child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
862 virtio_transport_reset(vsk, pkt);
866 sk->sk_ack_backlog++;
868 lock_sock_nested(child, SINGLE_DEPTH_NESTING);
870 child->sk_state = SS_CONNECTED;
872 vchild = vsock_sk(child);
873 vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
874 le32_to_cpu(pkt->hdr.dst_port));
875 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
876 le32_to_cpu(pkt->hdr.src_port));
878 vsock_insert_connected(vchild);
879 vsock_enqueue_accept(sk, child);
880 virtio_transport_send_response(vchild, pkt);
884 sk->sk_data_ready(sk);
888 static bool virtio_transport_space_update(struct sock *sk,
889 struct virtio_vsock_pkt *pkt)
891 struct vsock_sock *vsk = vsock_sk(sk);
892 struct virtio_vsock_sock *vvs = vsk->trans;
893 bool space_available;
895 /* buf_alloc and fwd_cnt is always included in the hdr */
896 spin_lock_bh(&vvs->tx_lock);
897 vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
898 vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
899 space_available = virtio_transport_has_space(vsk);
900 spin_unlock_bh(&vvs->tx_lock);
901 return space_available;
904 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
907 void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
909 struct sockaddr_vm src, dst;
910 struct vsock_sock *vsk;
912 bool space_available;
914 vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
915 le32_to_cpu(pkt->hdr.src_port));
916 vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
917 le32_to_cpu(pkt->hdr.dst_port));
919 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
920 dst.svm_cid, dst.svm_port,
921 le32_to_cpu(pkt->hdr.len),
922 le16_to_cpu(pkt->hdr.type),
923 le16_to_cpu(pkt->hdr.op),
924 le32_to_cpu(pkt->hdr.flags),
925 le32_to_cpu(pkt->hdr.buf_alloc),
926 le32_to_cpu(pkt->hdr.fwd_cnt));
928 if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
929 (void)virtio_transport_reset_no_sock(pkt);
933 /* The socket must be in connected or bound table
934 * otherwise send reset back
936 sk = vsock_find_connected_socket(&src, &dst);
938 sk = vsock_find_bound_socket(&dst);
940 (void)virtio_transport_reset_no_sock(pkt);
947 space_available = virtio_transport_space_update(sk, pkt);
951 /* Update CID in case it has changed after a transport reset event */
952 vsk->local_addr.svm_cid = dst.svm_cid;
955 sk->sk_write_space(sk);
957 switch (sk->sk_state) {
958 case VSOCK_SS_LISTEN:
959 virtio_transport_recv_listen(sk, pkt);
960 virtio_transport_free_pkt(pkt);
963 virtio_transport_recv_connecting(sk, pkt);
964 virtio_transport_free_pkt(pkt);
967 virtio_transport_recv_connected(sk, pkt);
969 case SS_DISCONNECTING:
970 virtio_transport_recv_disconnecting(sk, pkt);
971 virtio_transport_free_pkt(pkt);
974 virtio_transport_free_pkt(pkt);
979 /* Release refcnt obtained when we fetched this socket out of the
980 * bound or connected list.
986 virtio_transport_free_pkt(pkt);
988 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
990 void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
995 EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
997 MODULE_LICENSE("GPL v2");
998 MODULE_AUTHOR("Asias He");
999 MODULE_DESCRIPTION("common code for virtio vsock");