2 * common code for virtio vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/ctype.h>
13 #include <linux/list.h>
14 #include <linux/virtio.h>
15 #include <linux/virtio_ids.h>
16 #include <linux/virtio_config.h>
17 #include <linux/virtio_vsock.h>
20 #include <net/af_vsock.h>
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/vsock_virtio_transport_common.h>
25 /* How long to wait for graceful shutdown of a connection */
26 #define VSOCK_CLOSE_TIMEOUT (8 * HZ)
28 static const struct virtio_transport *virtio_transport_get_ops(void)
30 const struct vsock_transport *t = vsock_core_get_transport();
32 return container_of(t, struct virtio_transport, transport);
35 static struct virtio_vsock_pkt *
36 virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
43 struct virtio_vsock_pkt *pkt;
46 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
50 pkt->hdr.type = cpu_to_le16(info->type);
51 pkt->hdr.op = cpu_to_le16(info->op);
52 pkt->hdr.src_cid = cpu_to_le64(src_cid);
53 pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
54 pkt->hdr.src_port = cpu_to_le32(src_port);
55 pkt->hdr.dst_port = cpu_to_le32(dst_port);
56 pkt->hdr.flags = cpu_to_le32(info->flags);
58 pkt->hdr.len = cpu_to_le32(len);
59 pkt->reply = info->reply;
61 if (info->msg && len > 0) {
62 pkt->buf = kmalloc(len, GFP_KERNEL);
65 err = memcpy_from_msg(pkt->buf, info->msg, len);
70 trace_virtio_transport_alloc_pkt(src_cid, src_port,
86 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
87 struct virtio_vsock_pkt_info *info)
89 u32 src_cid, src_port, dst_cid, dst_port;
90 struct virtio_vsock_sock *vvs;
91 struct virtio_vsock_pkt *pkt;
92 u32 pkt_len = info->pkt_len;
94 src_cid = vm_sockets_get_local_cid();
95 src_port = vsk->local_addr.svm_port;
96 if (!info->remote_cid) {
97 dst_cid = vsk->remote_addr.svm_cid;
98 dst_port = vsk->remote_addr.svm_port;
100 dst_cid = info->remote_cid;
101 dst_port = info->remote_port;
106 /* we can send less than pkt_len bytes */
107 if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE)
108 pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
110 /* virtio_transport_get_credit might return less than pkt_len credit */
111 pkt_len = virtio_transport_get_credit(vvs, pkt_len);
113 /* Do not send zero length OP_RW pkt */
114 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
117 pkt = virtio_transport_alloc_pkt(info, pkt_len,
121 virtio_transport_put_credit(vvs, pkt_len);
125 virtio_transport_inc_tx_pkt(vvs, pkt);
127 return virtio_transport_get_ops()->send_pkt(pkt);
130 static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
131 struct virtio_vsock_pkt *pkt)
133 vvs->rx_bytes += pkt->len;
136 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
137 struct virtio_vsock_pkt *pkt)
139 vvs->rx_bytes -= pkt->len;
140 vvs->fwd_cnt += pkt->len;
143 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
145 spin_lock_bh(&vvs->tx_lock);
146 pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
147 pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
148 spin_unlock_bh(&vvs->tx_lock);
150 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
152 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
156 spin_lock_bh(&vvs->tx_lock);
157 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
161 spin_unlock_bh(&vvs->tx_lock);
165 EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
167 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
169 spin_lock_bh(&vvs->tx_lock);
170 vvs->tx_cnt -= credit;
171 spin_unlock_bh(&vvs->tx_lock);
173 EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
175 static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
177 struct virtio_vsock_hdr *hdr)
179 struct virtio_vsock_pkt_info info = {
180 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
184 return virtio_transport_send_pkt_info(vsk, &info);
188 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
192 struct virtio_vsock_sock *vvs = vsk->trans;
193 struct virtio_vsock_pkt *pkt;
194 size_t bytes, total = 0;
197 spin_lock_bh(&vvs->rx_lock);
198 while (total < len && !list_empty(&vvs->rx_queue)) {
199 pkt = list_first_entry(&vvs->rx_queue,
200 struct virtio_vsock_pkt, list);
203 if (bytes > pkt->len - pkt->off)
204 bytes = pkt->len - pkt->off;
206 /* sk_lock is held by caller so no one else can dequeue.
207 * Unlock rx_lock since memcpy_to_msg() may sleep.
209 spin_unlock_bh(&vvs->rx_lock);
211 err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
215 spin_lock_bh(&vvs->rx_lock);
219 if (pkt->off == pkt->len) {
220 virtio_transport_dec_rx_pkt(vvs, pkt);
221 list_del(&pkt->list);
222 virtio_transport_free_pkt(pkt);
225 spin_unlock_bh(&vvs->rx_lock);
227 /* Send a credit pkt to peer */
228 virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
240 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
242 size_t len, int flags)
244 if (flags & MSG_PEEK)
247 return virtio_transport_stream_do_dequeue(vsk, msg, len);
249 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
252 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
254 size_t len, int flags)
258 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
260 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
262 struct virtio_vsock_sock *vvs = vsk->trans;
265 spin_lock_bh(&vvs->rx_lock);
266 bytes = vvs->rx_bytes;
267 spin_unlock_bh(&vvs->rx_lock);
271 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
273 static s64 virtio_transport_has_space(struct vsock_sock *vsk)
275 struct virtio_vsock_sock *vvs = vsk->trans;
278 bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
285 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
287 struct virtio_vsock_sock *vvs = vsk->trans;
290 spin_lock_bh(&vvs->tx_lock);
291 bytes = virtio_transport_has_space(vsk);
292 spin_unlock_bh(&vvs->tx_lock);
296 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
298 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
299 struct vsock_sock *psk)
301 struct virtio_vsock_sock *vvs;
303 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
310 struct virtio_vsock_sock *ptrans = psk->trans;
312 vvs->buf_size = ptrans->buf_size;
313 vvs->buf_size_min = ptrans->buf_size_min;
314 vvs->buf_size_max = ptrans->buf_size_max;
315 vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
317 vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
318 vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
319 vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
322 vvs->buf_alloc = vvs->buf_size;
324 spin_lock_init(&vvs->rx_lock);
325 spin_lock_init(&vvs->tx_lock);
326 INIT_LIST_HEAD(&vvs->rx_queue);
330 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
332 u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
334 struct virtio_vsock_sock *vvs = vsk->trans;
336 return vvs->buf_size;
338 EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
340 u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
342 struct virtio_vsock_sock *vvs = vsk->trans;
344 return vvs->buf_size_min;
346 EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
348 u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
350 struct virtio_vsock_sock *vvs = vsk->trans;
352 return vvs->buf_size_max;
354 EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
356 void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
358 struct virtio_vsock_sock *vvs = vsk->trans;
360 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
361 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
362 if (val < vvs->buf_size_min)
363 vvs->buf_size_min = val;
364 if (val > vvs->buf_size_max)
365 vvs->buf_size_max = val;
367 vvs->buf_alloc = val;
369 EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
371 void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
373 struct virtio_vsock_sock *vvs = vsk->trans;
375 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
376 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
377 if (val > vvs->buf_size)
379 vvs->buf_size_min = val;
381 EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
383 void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
385 struct virtio_vsock_sock *vvs = vsk->trans;
387 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
388 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
389 if (val < vvs->buf_size)
391 vvs->buf_size_max = val;
393 EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
396 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
398 bool *data_ready_now)
400 if (vsock_stream_has_data(vsk))
401 *data_ready_now = true;
403 *data_ready_now = false;
407 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
410 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
412 bool *space_avail_now)
416 free_space = vsock_stream_has_space(vsk);
418 *space_avail_now = true;
419 else if (free_space == 0)
420 *space_avail_now = false;
424 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
426 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
427 size_t target, struct vsock_transport_recv_notify_data *data)
431 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
433 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
434 size_t target, struct vsock_transport_recv_notify_data *data)
438 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
440 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
441 size_t target, struct vsock_transport_recv_notify_data *data)
445 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
447 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
448 size_t target, ssize_t copied, bool data_read,
449 struct vsock_transport_recv_notify_data *data)
453 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
455 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
456 struct vsock_transport_send_notify_data *data)
460 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
462 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
463 struct vsock_transport_send_notify_data *data)
467 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
469 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
470 struct vsock_transport_send_notify_data *data)
474 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
476 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
477 ssize_t written, struct vsock_transport_send_notify_data *data)
481 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
483 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
485 struct virtio_vsock_sock *vvs = vsk->trans;
487 return vvs->buf_size;
489 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
491 bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
495 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
497 bool virtio_transport_stream_allow(u32 cid, u32 port)
501 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
503 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
504 struct sockaddr_vm *addr)
508 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
510 bool virtio_transport_dgram_allow(u32 cid, u32 port)
514 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
516 int virtio_transport_connect(struct vsock_sock *vsk)
518 struct virtio_vsock_pkt_info info = {
519 .op = VIRTIO_VSOCK_OP_REQUEST,
520 .type = VIRTIO_VSOCK_TYPE_STREAM,
523 return virtio_transport_send_pkt_info(vsk, &info);
525 EXPORT_SYMBOL_GPL(virtio_transport_connect);
527 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
529 struct virtio_vsock_pkt_info info = {
530 .op = VIRTIO_VSOCK_OP_SHUTDOWN,
531 .type = VIRTIO_VSOCK_TYPE_STREAM,
532 .flags = (mode & RCV_SHUTDOWN ?
533 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
534 (mode & SEND_SHUTDOWN ?
535 VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
538 return virtio_transport_send_pkt_info(vsk, &info);
540 EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
543 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
544 struct sockaddr_vm *remote_addr,
550 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
553 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
557 struct virtio_vsock_pkt_info info = {
558 .op = VIRTIO_VSOCK_OP_RW,
559 .type = VIRTIO_VSOCK_TYPE_STREAM,
564 return virtio_transport_send_pkt_info(vsk, &info);
566 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
568 void virtio_transport_destruct(struct vsock_sock *vsk)
570 struct virtio_vsock_sock *vvs = vsk->trans;
574 EXPORT_SYMBOL_GPL(virtio_transport_destruct);
576 static int virtio_transport_reset(struct vsock_sock *vsk,
577 struct virtio_vsock_pkt *pkt)
579 struct virtio_vsock_pkt_info info = {
580 .op = VIRTIO_VSOCK_OP_RST,
581 .type = VIRTIO_VSOCK_TYPE_STREAM,
585 /* Send RST only if the original pkt is not a RST pkt */
586 if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
589 return virtio_transport_send_pkt_info(vsk, &info);
592 /* Normally packets are associated with a socket. There may be no socket if an
593 * attempt was made to connect to a socket that does not exist.
595 static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
597 struct virtio_vsock_pkt_info info = {
598 .op = VIRTIO_VSOCK_OP_RST,
599 .type = le16_to_cpu(pkt->hdr.type),
603 /* Send RST only if the original pkt is not a RST pkt */
604 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
607 pkt = virtio_transport_alloc_pkt(&info, 0,
608 le64_to_cpu(pkt->hdr.dst_cid),
609 le32_to_cpu(pkt->hdr.dst_port),
610 le64_to_cpu(pkt->hdr.src_cid),
611 le32_to_cpu(pkt->hdr.src_port));
615 return virtio_transport_get_ops()->send_pkt(pkt);
618 static void virtio_transport_wait_close(struct sock *sk, long timeout)
621 DEFINE_WAIT_FUNC(wait, woken_wake_function);
623 add_wait_queue(sk_sleep(sk), &wait);
626 if (sk_wait_event(sk, &timeout,
627 sock_flag(sk, SOCK_DONE), &wait))
629 } while (!signal_pending(current) && timeout);
631 remove_wait_queue(sk_sleep(sk), &wait);
635 static void virtio_transport_do_close(struct vsock_sock *vsk,
638 struct sock *sk = sk_vsock(vsk);
640 sock_set_flag(sk, SOCK_DONE);
641 vsk->peer_shutdown = SHUTDOWN_MASK;
642 if (vsock_stream_has_data(vsk) <= 0)
643 sk->sk_state = SS_DISCONNECTING;
644 sk->sk_state_change(sk);
646 if (vsk->close_work_scheduled &&
647 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
648 vsk->close_work_scheduled = false;
650 vsock_remove_sock(vsk);
652 /* Release refcnt obtained when we scheduled the timeout */
657 static void virtio_transport_close_timeout(struct work_struct *work)
659 struct vsock_sock *vsk =
660 container_of(work, struct vsock_sock, close_work.work);
661 struct sock *sk = sk_vsock(vsk);
666 if (!sock_flag(sk, SOCK_DONE)) {
667 (void)virtio_transport_reset(vsk, NULL);
669 virtio_transport_do_close(vsk, false);
672 vsk->close_work_scheduled = false;
678 /* User context, vsk->sk is locked */
679 static bool virtio_transport_close(struct vsock_sock *vsk)
681 struct sock *sk = &vsk->sk;
683 if (!(sk->sk_state == SS_CONNECTED ||
684 sk->sk_state == SS_DISCONNECTING))
687 /* Already received SHUTDOWN from peer, reply with RST */
688 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
689 (void)virtio_transport_reset(vsk, NULL);
693 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
694 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
696 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
697 virtio_transport_wait_close(sk, sk->sk_lingertime);
699 if (sock_flag(sk, SOCK_DONE)) {
704 INIT_DELAYED_WORK(&vsk->close_work,
705 virtio_transport_close_timeout);
706 vsk->close_work_scheduled = true;
707 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
711 void virtio_transport_release(struct vsock_sock *vsk)
713 struct sock *sk = &vsk->sk;
714 bool remove_sock = true;
717 if (sk->sk_type == SOCK_STREAM)
718 remove_sock = virtio_transport_close(vsk);
722 vsock_remove_sock(vsk);
724 EXPORT_SYMBOL_GPL(virtio_transport_release);
727 virtio_transport_recv_connecting(struct sock *sk,
728 struct virtio_vsock_pkt *pkt)
730 struct vsock_sock *vsk = vsock_sk(sk);
734 switch (le16_to_cpu(pkt->hdr.op)) {
735 case VIRTIO_VSOCK_OP_RESPONSE:
736 sk->sk_state = SS_CONNECTED;
737 sk->sk_socket->state = SS_CONNECTED;
738 vsock_insert_connected(vsk);
739 sk->sk_state_change(sk);
741 case VIRTIO_VSOCK_OP_INVALID:
743 case VIRTIO_VSOCK_OP_RST:
755 virtio_transport_reset(vsk, pkt);
756 sk->sk_state = SS_UNCONNECTED;
758 sk->sk_error_report(sk);
763 virtio_transport_recv_connected(struct sock *sk,
764 struct virtio_vsock_pkt *pkt)
766 struct vsock_sock *vsk = vsock_sk(sk);
767 struct virtio_vsock_sock *vvs = vsk->trans;
770 switch (le16_to_cpu(pkt->hdr.op)) {
771 case VIRTIO_VSOCK_OP_RW:
772 pkt->len = le32_to_cpu(pkt->hdr.len);
775 spin_lock_bh(&vvs->rx_lock);
776 virtio_transport_inc_rx_pkt(vvs, pkt);
777 list_add_tail(&pkt->list, &vvs->rx_queue);
778 spin_unlock_bh(&vvs->rx_lock);
780 sk->sk_data_ready(sk);
782 case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
783 sk->sk_write_space(sk);
785 case VIRTIO_VSOCK_OP_SHUTDOWN:
786 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
787 vsk->peer_shutdown |= RCV_SHUTDOWN;
788 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
789 vsk->peer_shutdown |= SEND_SHUTDOWN;
790 if (vsk->peer_shutdown == SHUTDOWN_MASK &&
791 vsock_stream_has_data(vsk) <= 0)
792 sk->sk_state = SS_DISCONNECTING;
793 if (le32_to_cpu(pkt->hdr.flags))
794 sk->sk_state_change(sk);
796 case VIRTIO_VSOCK_OP_RST:
797 virtio_transport_do_close(vsk, true);
804 virtio_transport_free_pkt(pkt);
809 virtio_transport_recv_disconnecting(struct sock *sk,
810 struct virtio_vsock_pkt *pkt)
812 struct vsock_sock *vsk = vsock_sk(sk);
814 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
815 virtio_transport_do_close(vsk, true);
819 virtio_transport_send_response(struct vsock_sock *vsk,
820 struct virtio_vsock_pkt *pkt)
822 struct virtio_vsock_pkt_info info = {
823 .op = VIRTIO_VSOCK_OP_RESPONSE,
824 .type = VIRTIO_VSOCK_TYPE_STREAM,
825 .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
826 .remote_port = le32_to_cpu(pkt->hdr.src_port),
830 return virtio_transport_send_pkt_info(vsk, &info);
833 /* Handle server socket */
835 virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
837 struct vsock_sock *vsk = vsock_sk(sk);
838 struct vsock_sock *vchild;
841 if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
842 virtio_transport_reset(vsk, pkt);
846 if (sk_acceptq_is_full(sk)) {
847 virtio_transport_reset(vsk, pkt);
851 child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
854 virtio_transport_reset(vsk, pkt);
858 sk->sk_ack_backlog++;
860 lock_sock_nested(child, SINGLE_DEPTH_NESTING);
862 child->sk_state = SS_CONNECTED;
864 vchild = vsock_sk(child);
865 vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
866 le32_to_cpu(pkt->hdr.dst_port));
867 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
868 le32_to_cpu(pkt->hdr.src_port));
870 vsock_insert_connected(vchild);
871 vsock_enqueue_accept(sk, child);
872 virtio_transport_send_response(vchild, pkt);
876 sk->sk_data_ready(sk);
880 static bool virtio_transport_space_update(struct sock *sk,
881 struct virtio_vsock_pkt *pkt)
883 struct vsock_sock *vsk = vsock_sk(sk);
884 struct virtio_vsock_sock *vvs = vsk->trans;
885 bool space_available;
887 /* buf_alloc and fwd_cnt is always included in the hdr */
888 spin_lock_bh(&vvs->tx_lock);
889 vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
890 vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
891 space_available = virtio_transport_has_space(vsk);
892 spin_unlock_bh(&vvs->tx_lock);
893 return space_available;
896 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
899 void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
901 struct sockaddr_vm src, dst;
902 struct vsock_sock *vsk;
904 bool space_available;
906 vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
907 le32_to_cpu(pkt->hdr.src_port));
908 vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
909 le32_to_cpu(pkt->hdr.dst_port));
911 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
912 dst.svm_cid, dst.svm_port,
913 le32_to_cpu(pkt->hdr.len),
914 le16_to_cpu(pkt->hdr.type),
915 le16_to_cpu(pkt->hdr.op),
916 le32_to_cpu(pkt->hdr.flags),
917 le32_to_cpu(pkt->hdr.buf_alloc),
918 le32_to_cpu(pkt->hdr.fwd_cnt));
920 if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
921 (void)virtio_transport_reset_no_sock(pkt);
925 /* The socket must be in connected or bound table
926 * otherwise send reset back
928 sk = vsock_find_connected_socket(&src, &dst);
930 sk = vsock_find_bound_socket(&dst);
932 (void)virtio_transport_reset_no_sock(pkt);
939 space_available = virtio_transport_space_update(sk, pkt);
943 /* Update CID in case it has changed after a transport reset event */
944 vsk->local_addr.svm_cid = dst.svm_cid;
947 sk->sk_write_space(sk);
949 switch (sk->sk_state) {
950 case VSOCK_SS_LISTEN:
951 virtio_transport_recv_listen(sk, pkt);
952 virtio_transport_free_pkt(pkt);
955 virtio_transport_recv_connecting(sk, pkt);
956 virtio_transport_free_pkt(pkt);
959 virtio_transport_recv_connected(sk, pkt);
961 case SS_DISCONNECTING:
962 virtio_transport_recv_disconnecting(sk, pkt);
963 virtio_transport_free_pkt(pkt);
966 virtio_transport_free_pkt(pkt);
971 /* Release refcnt obtained when we fetched this socket out of the
972 * bound or connected list.
978 virtio_transport_free_pkt(pkt);
980 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
982 void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
987 EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
989 MODULE_LICENSE("GPL v2");
990 MODULE_AUTHOR("Asias He");
991 MODULE_DESCRIPTION("common code for virtio vsock");