2 #include <linux/errno.h>
3 #include <linux/errqueue.h>
4 #include <linux/file.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
9 #include <linux/netdevice.h>
10 #include <linux/poll.h>
11 #include <linux/rculist.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/uaccess.h>
15 #include <linux/workqueue.h>
17 #include <net/netns/generic.h>
20 #include <uapi/linux/kcm.h>
22 unsigned int kcm_net_id;
24 static struct kmem_cache *kcm_psockp __read_mostly;
25 static struct kmem_cache *kcm_muxp __read_mostly;
26 static struct workqueue_struct *kcm_wq;
28 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
30 return (struct kcm_sock *)sk;
33 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
35 return (struct kcm_tx_msg *)skb->cb;
38 static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
40 return (struct kcm_rx_msg *)((void *)skb->cb +
41 offsetof(struct qdisc_skb_cb, data));
44 static void report_csk_error(struct sock *csk, int err)
47 csk->sk_error_report(csk);
50 /* Callback lock held */
51 static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
54 struct sock *csk = psock->sk;
56 /* Unrecoverable error in receive */
58 if (psock->rx_stopped)
61 psock->rx_stopped = 1;
62 KCM_STATS_INCR(psock->stats.rx_aborts);
64 /* Report an error on the lower socket */
65 report_csk_error(csk, err);
68 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
71 struct sock *csk = psock->sk;
72 struct kcm_mux *mux = psock->mux;
74 /* Unrecoverable error in transmit */
76 spin_lock_bh(&mux->lock);
78 if (psock->tx_stopped) {
79 spin_unlock_bh(&mux->lock);
83 psock->tx_stopped = 1;
84 KCM_STATS_INCR(psock->stats.tx_aborts);
87 /* Take off psocks_avail list */
88 list_del(&psock->psock_avail_list);
89 } else if (wakeup_kcm) {
90 /* In this case psock is being aborted while outside of
91 * write_msgs and psock is reserved. Schedule tx_work
92 * to handle the failure there. Need to commit tx_stopped
93 * before queuing work.
97 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
100 spin_unlock_bh(&mux->lock);
102 /* Report error on lower socket */
103 report_csk_error(csk, err);
106 /* RX mux lock held. */
107 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
108 struct kcm_psock *psock)
110 KCM_STATS_ADD(mux->stats.rx_bytes,
111 psock->stats.rx_bytes - psock->saved_rx_bytes);
112 mux->stats.rx_msgs +=
113 psock->stats.rx_msgs - psock->saved_rx_msgs;
114 psock->saved_rx_msgs = psock->stats.rx_msgs;
115 psock->saved_rx_bytes = psock->stats.rx_bytes;
118 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
119 struct kcm_psock *psock)
121 KCM_STATS_ADD(mux->stats.tx_bytes,
122 psock->stats.tx_bytes - psock->saved_tx_bytes);
123 mux->stats.tx_msgs +=
124 psock->stats.tx_msgs - psock->saved_tx_msgs;
125 psock->saved_tx_msgs = psock->stats.tx_msgs;
126 psock->saved_tx_bytes = psock->stats.tx_bytes;
129 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
131 /* KCM is ready to receive messages on its queue-- either the KCM is new or
132 * has become unblocked after being blocked on full socket buffer. Queue any
133 * pending ready messages on a psock. RX mux lock held.
135 static void kcm_rcv_ready(struct kcm_sock *kcm)
137 struct kcm_mux *mux = kcm->mux;
138 struct kcm_psock *psock;
141 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
144 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
145 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
146 /* Assuming buffer limit has been reached */
147 skb_queue_head(&mux->rx_hold_queue, skb);
148 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
153 while (!list_empty(&mux->psocks_ready)) {
154 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
157 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
158 /* Assuming buffer limit has been reached */
159 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
163 /* Consumed the ready message on the psock. Schedule rx_work to
166 list_del(&psock->psock_ready_list);
167 psock->ready_rx_msg = NULL;
169 /* Commit clearing of ready_rx_msg for queuing work */
172 queue_work(kcm_wq, &psock->rx_work);
175 /* Buffer limit is okay now, add to ready list */
176 list_add_tail(&kcm->wait_rx_list,
177 &kcm->mux->kcm_rx_waiters);
181 static void kcm_rfree(struct sk_buff *skb)
183 struct sock *sk = skb->sk;
184 struct kcm_sock *kcm = kcm_sk(sk);
185 struct kcm_mux *mux = kcm->mux;
186 unsigned int len = skb->truesize;
188 sk_mem_uncharge(sk, len);
189 atomic_sub(len, &sk->sk_rmem_alloc);
191 /* For reading rx_wait and rx_psock without holding lock */
192 smp_mb__after_atomic();
194 if (!kcm->rx_wait && !kcm->rx_psock &&
195 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
196 spin_lock_bh(&mux->rx_lock);
198 spin_unlock_bh(&mux->rx_lock);
202 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
204 struct sk_buff_head *list = &sk->sk_receive_queue;
206 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
209 if (!sk_rmem_schedule(sk, skb, skb->truesize))
216 skb->destructor = kcm_rfree;
217 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
218 sk_mem_charge(sk, skb->truesize);
220 skb_queue_tail(list, skb);
222 if (!sock_flag(sk, SOCK_DEAD))
223 sk->sk_data_ready(sk);
228 /* Requeue received messages for a kcm socket to other kcm sockets. This is
229 * called with a kcm socket is receive disabled.
232 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
235 struct kcm_sock *kcm;
237 while ((skb = __skb_dequeue(head))) {
238 /* Reset destructor to avoid calling kcm_rcv_ready */
239 skb->destructor = sock_rfree;
242 if (list_empty(&mux->kcm_rx_waiters)) {
243 skb_queue_tail(&mux->rx_hold_queue, skb);
247 kcm = list_first_entry(&mux->kcm_rx_waiters,
248 struct kcm_sock, wait_rx_list);
250 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
251 /* Should mean socket buffer full */
252 list_del(&kcm->wait_rx_list);
253 kcm->rx_wait = false;
255 /* Commit rx_wait to read in kcm_free */
263 /* Lower sock lock held */
264 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
265 struct sk_buff *head)
267 struct kcm_mux *mux = psock->mux;
268 struct kcm_sock *kcm;
270 WARN_ON(psock->ready_rx_msg);
273 return psock->rx_kcm;
275 spin_lock_bh(&mux->rx_lock);
278 spin_unlock_bh(&mux->rx_lock);
279 return psock->rx_kcm;
282 kcm_update_rx_mux_stats(mux, psock);
284 if (list_empty(&mux->kcm_rx_waiters)) {
285 psock->ready_rx_msg = head;
286 list_add_tail(&psock->psock_ready_list,
288 spin_unlock_bh(&mux->rx_lock);
292 kcm = list_first_entry(&mux->kcm_rx_waiters,
293 struct kcm_sock, wait_rx_list);
294 list_del(&kcm->wait_rx_list);
295 kcm->rx_wait = false;
298 kcm->rx_psock = psock;
300 spin_unlock_bh(&mux->rx_lock);
305 static void kcm_done(struct kcm_sock *kcm);
307 static void kcm_done_work(struct work_struct *w)
309 kcm_done(container_of(w, struct kcm_sock, done_work));
312 /* Lower sock held */
313 static void unreserve_rx_kcm(struct kcm_psock *psock,
316 struct kcm_sock *kcm = psock->rx_kcm;
317 struct kcm_mux *mux = psock->mux;
322 spin_lock_bh(&mux->rx_lock);
324 psock->rx_kcm = NULL;
325 kcm->rx_psock = NULL;
327 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
332 if (unlikely(kcm->done)) {
333 spin_unlock_bh(&mux->rx_lock);
335 /* Need to run kcm_done in a task since we need to qcquire
336 * callback locks which may already be held here.
338 INIT_WORK(&kcm->done_work, kcm_done_work);
339 schedule_work(&kcm->done_work);
343 if (unlikely(kcm->rx_disabled)) {
344 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
345 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
346 /* Check for degenerative race with rx_wait that all
347 * data was dequeued (accounted for in kcm_rfree).
351 spin_unlock_bh(&mux->rx_lock);
354 /* Macro to invoke filter function. */
355 #define KCM_RUN_FILTER(prog, ctx) \
356 (*prog->bpf_func)(ctx, prog->insnsi)
358 /* Lower socket lock held */
359 static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
360 unsigned int orig_offset, size_t orig_len)
362 struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
363 struct kcm_rx_msg *rxm;
364 struct kcm_sock *kcm;
365 struct sk_buff *head, *skb;
366 size_t eaten = 0, cand_len;
369 bool cloned_orig = false;
371 if (psock->ready_rx_msg)
374 head = psock->rx_skb_head;
376 /* Message already in progress */
378 if (unlikely(orig_offset)) {
379 /* Getting data with a non-zero offset when a message is
380 * in progress is not expected. If it does happen, we
381 * need to clone and pull since we can't deal with
382 * offsets in the skbs for a message expect in the head.
384 orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
386 KCM_STATS_INCR(psock->stats.rx_mem_fail);
387 desc->error = -ENOMEM;
390 if (!pskb_pull(orig_skb, orig_offset)) {
391 KCM_STATS_INCR(psock->stats.rx_mem_fail);
393 desc->error = -ENOMEM;
400 if (!psock->rx_skb_nextp) {
401 /* We are going to append to the frags_list of head.
402 * Need to unshare the frag_list.
404 err = skb_unclone(head, GFP_ATOMIC);
406 KCM_STATS_INCR(psock->stats.rx_mem_fail);
411 if (unlikely(skb_shinfo(head)->frag_list)) {
412 /* We can't append to an sk_buff that already
413 * has a frag_list. We create a new head, point
414 * the frag_list of that to the old head, and
415 * then are able to use the old head->next for
416 * appending to the message.
418 if (WARN_ON(head->next)) {
419 desc->error = -EINVAL;
423 skb = alloc_skb(0, GFP_ATOMIC);
425 KCM_STATS_INCR(psock->stats.rx_mem_fail);
426 desc->error = -ENOMEM;
429 skb->len = head->len;
430 skb->data_len = head->len;
431 skb->truesize = head->truesize;
432 *kcm_rx_msg(skb) = *kcm_rx_msg(head);
433 psock->rx_skb_nextp = &head->next;
434 skb_shinfo(skb)->frag_list = head;
435 psock->rx_skb_head = skb;
438 psock->rx_skb_nextp =
439 &skb_shinfo(head)->frag_list;
444 while (eaten < orig_len) {
445 /* Always clone since we will consume something */
446 skb = skb_clone(orig_skb, GFP_ATOMIC);
448 KCM_STATS_INCR(psock->stats.rx_mem_fail);
449 desc->error = -ENOMEM;
453 cand_len = orig_len - eaten;
455 head = psock->rx_skb_head;
458 psock->rx_skb_head = head;
459 /* Will set rx_skb_nextp on next packet if needed */
460 psock->rx_skb_nextp = NULL;
461 rxm = kcm_rx_msg(head);
462 memset(rxm, 0, sizeof(*rxm));
463 rxm->offset = orig_offset + eaten;
465 /* Unclone since we may be appending to an skb that we
466 * already share a frag_list with.
468 err = skb_unclone(skb, GFP_ATOMIC);
470 KCM_STATS_INCR(psock->stats.rx_mem_fail);
475 rxm = kcm_rx_msg(head);
476 *psock->rx_skb_nextp = skb;
477 psock->rx_skb_nextp = &skb->next;
478 head->data_len += skb->len;
479 head->len += skb->len;
480 head->truesize += skb->truesize;
483 if (!rxm->full_len) {
486 len = KCM_RUN_FILTER(psock->bpf_prog, head);
489 /* Need more header to determine length */
490 rxm->accum_len += cand_len;
492 KCM_STATS_INCR(psock->stats.rx_need_more_hdr);
493 WARN_ON(eaten != orig_len);
495 } else if (len <= (ssize_t)head->len -
496 skb->len - rxm->offset) {
497 /* Length must be into new skb (and also
500 KCM_STATS_INCR(psock->stats.rx_bad_hdr_len);
501 desc->error = -EPROTO;
502 psock->rx_skb_head = NULL;
503 kcm_abort_rx_psock(psock, EPROTO, head);
510 extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
513 /* Message not complete yet. */
514 rxm->accum_len += cand_len;
516 WARN_ON(eaten != orig_len);
520 /* Positive extra indicates ore bytes than needed for the
524 WARN_ON(extra > cand_len);
526 eaten += (cand_len - extra);
528 /* Hurray, we have a new message! */
529 psock->rx_skb_head = NULL;
530 KCM_STATS_INCR(psock->stats.rx_msgs);
533 kcm = reserve_rx_kcm(psock, head);
535 /* Unable to reserve a KCM, message is held in psock. */
539 if (kcm_queue_rcv_skb(&kcm->sk, head)) {
540 /* Should mean socket buffer full */
541 unreserve_rx_kcm(psock, false);
549 KCM_STATS_ADD(psock->stats.rx_bytes, eaten);
554 /* Called with lock held on lower socket */
555 static int psock_tcp_read_sock(struct kcm_psock *psock)
557 read_descriptor_t desc;
559 desc.arg.data = psock;
561 desc.count = 1; /* give more than one skb per call */
563 /* sk should be locked here, so okay to do tcp_read_sock */
564 tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
566 unreserve_rx_kcm(psock, true);
571 /* Lower sock lock held */
572 static void psock_tcp_data_ready(struct sock *sk)
574 struct kcm_psock *psock;
576 read_lock_bh(&sk->sk_callback_lock);
578 psock = (struct kcm_psock *)sk->sk_user_data;
579 if (unlikely(!psock || psock->rx_stopped))
582 if (psock->ready_rx_msg)
585 if (psock_tcp_read_sock(psock) == -ENOMEM)
586 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
589 read_unlock_bh(&sk->sk_callback_lock);
592 static void do_psock_rx_work(struct kcm_psock *psock)
594 read_descriptor_t rd_desc;
595 struct sock *csk = psock->sk;
597 /* We need the read lock to synchronize with psock_tcp_data_ready. We
598 * need the socket lock for calling tcp_read_sock.
601 read_lock_bh(&csk->sk_callback_lock);
603 if (unlikely(csk->sk_user_data != psock))
606 if (unlikely(psock->rx_stopped))
609 if (psock->ready_rx_msg)
612 rd_desc.arg.data = psock;
614 if (psock_tcp_read_sock(psock) == -ENOMEM)
615 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
618 read_unlock_bh(&csk->sk_callback_lock);
622 static void psock_rx_work(struct work_struct *w)
624 do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
627 static void psock_rx_delayed_work(struct work_struct *w)
629 do_psock_rx_work(container_of(w, struct kcm_psock,
630 rx_delayed_work.work));
633 static void psock_tcp_state_change(struct sock *sk)
635 /* TCP only does a POLLIN for a half close. Do a POLLHUP here
636 * since application will normally not poll with POLLIN
637 * on the TCP sockets.
640 report_csk_error(sk, EPIPE);
643 static void psock_tcp_write_space(struct sock *sk)
645 struct kcm_psock *psock;
647 struct kcm_sock *kcm;
649 read_lock_bh(&sk->sk_callback_lock);
651 psock = (struct kcm_psock *)sk->sk_user_data;
652 if (unlikely(!psock))
657 spin_lock_bh(&mux->lock);
659 /* Check if the socket is reserved so someone is waiting for sending. */
662 queue_work(kcm_wq, &kcm->tx_work);
664 spin_unlock_bh(&mux->lock);
666 read_unlock_bh(&sk->sk_callback_lock);
669 static void unreserve_psock(struct kcm_sock *kcm);
671 /* kcm sock is locked. */
672 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
674 struct kcm_mux *mux = kcm->mux;
675 struct kcm_psock *psock;
677 psock = kcm->tx_psock;
679 smp_rmb(); /* Must read tx_psock before tx_wait */
682 WARN_ON(kcm->tx_wait);
683 if (unlikely(psock->tx_stopped))
684 unreserve_psock(kcm);
686 return kcm->tx_psock;
689 spin_lock_bh(&mux->lock);
691 /* Check again under lock to see if psock was reserved for this
692 * psock via psock_unreserve.
694 psock = kcm->tx_psock;
695 if (unlikely(psock)) {
696 WARN_ON(kcm->tx_wait);
697 spin_unlock_bh(&mux->lock);
698 return kcm->tx_psock;
701 if (!list_empty(&mux->psocks_avail)) {
702 psock = list_first_entry(&mux->psocks_avail,
705 list_del(&psock->psock_avail_list);
707 list_del(&kcm->wait_psock_list);
708 kcm->tx_wait = false;
710 kcm->tx_psock = psock;
712 KCM_STATS_INCR(psock->stats.reserved);
713 } else if (!kcm->tx_wait) {
714 list_add_tail(&kcm->wait_psock_list,
715 &mux->kcm_tx_waiters);
719 spin_unlock_bh(&mux->lock);
725 static void psock_now_avail(struct kcm_psock *psock)
727 struct kcm_mux *mux = psock->mux;
728 struct kcm_sock *kcm;
730 if (list_empty(&mux->kcm_tx_waiters)) {
731 list_add_tail(&psock->psock_avail_list,
734 kcm = list_first_entry(&mux->kcm_tx_waiters,
737 list_del(&kcm->wait_psock_list);
738 kcm->tx_wait = false;
741 /* Commit before changing tx_psock since that is read in
742 * reserve_psock before queuing work.
746 kcm->tx_psock = psock;
747 KCM_STATS_INCR(psock->stats.reserved);
748 queue_work(kcm_wq, &kcm->tx_work);
752 /* kcm sock is locked. */
753 static void unreserve_psock(struct kcm_sock *kcm)
755 struct kcm_psock *psock;
756 struct kcm_mux *mux = kcm->mux;
758 spin_lock_bh(&mux->lock);
760 psock = kcm->tx_psock;
762 if (WARN_ON(!psock)) {
763 spin_unlock_bh(&mux->lock);
767 smp_rmb(); /* Read tx_psock before tx_wait */
769 kcm_update_tx_mux_stats(mux, psock);
771 WARN_ON(kcm->tx_wait);
773 kcm->tx_psock = NULL;
774 psock->tx_kcm = NULL;
775 KCM_STATS_INCR(psock->stats.unreserved);
777 if (unlikely(psock->tx_stopped)) {
780 list_del(&psock->psock_list);
783 fput(psock->sk->sk_socket->file);
784 kmem_cache_free(kcm_psockp, psock);
787 /* Don't put back on available list */
789 spin_unlock_bh(&mux->lock);
794 psock_now_avail(psock);
796 spin_unlock_bh(&mux->lock);
799 static void kcm_report_tx_retry(struct kcm_sock *kcm)
801 struct kcm_mux *mux = kcm->mux;
803 spin_lock_bh(&mux->lock);
804 KCM_STATS_INCR(mux->stats.tx_retries);
805 spin_unlock_bh(&mux->lock);
808 /* Write any messages ready on the kcm socket. Called with kcm sock lock
809 * held. Return bytes actually sent or error.
811 static int kcm_write_msgs(struct kcm_sock *kcm)
813 struct sock *sk = &kcm->sk;
814 struct kcm_psock *psock;
815 struct sk_buff *skb, *head;
816 struct kcm_tx_msg *txm;
817 unsigned short fragidx, frag_offset;
818 unsigned int sent, total_sent = 0;
821 kcm->tx_wait_more = false;
822 psock = kcm->tx_psock;
823 if (unlikely(psock && psock->tx_stopped)) {
824 /* A reserved psock was aborted asynchronously. Unreserve
825 * it and we'll retry the message.
827 unreserve_psock(kcm);
828 kcm_report_tx_retry(kcm);
829 if (skb_queue_empty(&sk->sk_write_queue))
832 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
834 } else if (skb_queue_empty(&sk->sk_write_queue)) {
838 head = skb_peek(&sk->sk_write_queue);
839 txm = kcm_tx_msg(head);
842 /* Send of first skbuff in queue already in progress */
843 if (WARN_ON(!psock)) {
848 frag_offset = txm->frag_offset;
849 fragidx = txm->fragidx;
856 psock = reserve_psock(kcm);
862 txm = kcm_tx_msg(head);
866 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
871 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
877 frag = &skb_shinfo(skb)->frags[fragidx];
878 if (WARN_ON(!frag->size)) {
883 ret = kernel_sendpage(psock->sk->sk_socket,
885 frag->page_offset + frag_offset,
886 frag->size - frag_offset,
889 if (ret == -EAGAIN) {
890 /* Save state to try again when there's
891 * write space on the socket
894 txm->frag_offset = frag_offset;
895 txm->fragidx = fragidx;
902 /* Hard failure in sending message, abort this
903 * psock since it has lost framing
904 * synchonization and retry sending the
905 * message from the beginning.
907 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
909 unreserve_psock(kcm);
912 kcm_report_tx_retry(kcm);
920 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
921 if (frag_offset < frag->size) {
922 /* Not finished with this frag */
928 if (skb_has_frag_list(skb)) {
929 skb = skb_shinfo(skb)->frag_list;
932 } else if (skb->next) {
937 /* Successfully sent the whole packet, account for it. */
938 skb_dequeue(&sk->sk_write_queue);
940 sk->sk_wmem_queued -= sent;
942 KCM_STATS_INCR(psock->stats.tx_msgs);
943 } while ((head = skb_peek(&sk->sk_write_queue)));
946 /* Done with all queued messages. */
947 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
948 unreserve_psock(kcm);
951 /* Check if write space is available */
952 sk->sk_write_space(sk);
954 return total_sent ? : ret;
957 static void kcm_tx_work(struct work_struct *w)
959 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
960 struct sock *sk = &kcm->sk;
965 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
968 err = kcm_write_msgs(kcm);
970 /* Hard failure in write, report error on KCM socket */
971 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
972 report_csk_error(&kcm->sk, -err);
976 /* Primarily for SOCK_SEQPACKET sockets */
977 if (likely(sk->sk_socket) &&
978 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
979 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
980 sk->sk_write_space(sk);
987 static void kcm_push(struct kcm_sock *kcm)
989 if (kcm->tx_wait_more)
993 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
995 struct sock *sk = sock->sk;
996 struct kcm_sock *kcm = kcm_sk(sk);
997 struct sk_buff *skb = NULL, *head = NULL;
998 size_t copy, copied = 0;
999 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1000 int eor = (sock->type == SOCK_DGRAM) ?
1001 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
1006 /* Per tcp_sendmsg this should be in poll */
1007 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1013 /* Previously opened message */
1014 head = kcm->seq_skb;
1015 skb = kcm_tx_msg(head)->last_skb;
1019 /* Call the sk_stream functions to manage the sndbuf mem. */
1020 if (!sk_stream_memory_free(sk)) {
1022 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1023 err = sk_stream_wait_memory(sk, &timeo);
1028 /* New message, alloc head skb */
1029 head = alloc_skb(0, sk->sk_allocation);
1032 err = sk_stream_wait_memory(sk, &timeo);
1036 head = alloc_skb(0, sk->sk_allocation);
1041 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
1042 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
1044 skb->ip_summed = CHECKSUM_UNNECESSARY;
1047 while (msg_data_left(msg)) {
1049 int i = skb_shinfo(skb)->nr_frags;
1050 struct page_frag *pfrag = sk_page_frag(sk);
1052 if (!sk_page_frag_refill(sk, pfrag))
1053 goto wait_for_memory;
1055 if (!skb_can_coalesce(skb, i, pfrag->page,
1057 if (i == MAX_SKB_FRAGS) {
1058 struct sk_buff *tskb;
1060 tskb = alloc_skb(0, sk->sk_allocation);
1062 goto wait_for_memory;
1065 skb_shinfo(head)->frag_list = tskb;
1070 skb->ip_summed = CHECKSUM_UNNECESSARY;
1076 copy = min_t(int, msg_data_left(msg),
1077 pfrag->size - pfrag->offset);
1079 if (!sk_wmem_schedule(sk, copy))
1080 goto wait_for_memory;
1082 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1089 /* Update the skb. */
1091 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1093 skb_fill_page_desc(skb, i, pfrag->page,
1094 pfrag->offset, copy);
1095 get_page(pfrag->page);
1098 pfrag->offset += copy;
1102 head->data_len += copy;
1109 err = sk_stream_wait_memory(sk, &timeo);
1115 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1117 /* Message complete, queue it on send buffer */
1118 __skb_queue_tail(&sk->sk_write_queue, head);
1119 kcm->seq_skb = NULL;
1120 KCM_STATS_INCR(kcm->stats.tx_msgs);
1122 if (msg->msg_flags & MSG_BATCH) {
1123 kcm->tx_wait_more = true;
1124 } else if (kcm->tx_wait_more || not_busy) {
1125 err = kcm_write_msgs(kcm);
1127 /* We got a hard error in write_msgs but have
1128 * already queued this message. Report an error
1129 * in the socket, but don't affect return value
1132 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1133 report_csk_error(&kcm->sk, -err);
1137 /* Message not complete, save state */
1139 kcm->seq_skb = head;
1140 kcm_tx_msg(head)->last_skb = skb;
1143 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1151 if (copied && sock->type == SOCK_SEQPACKET) {
1152 /* Wrote some bytes before encountering an
1153 * error, return partial success.
1155 goto partial_message;
1158 if (head != kcm->seq_skb)
1161 err = sk_stream_error(sk, msg->msg_flags, err);
1163 /* make sure we wake any epoll edge trigger waiter */
1164 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1165 sk->sk_write_space(sk);
1171 static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1172 long timeo, int *err)
1174 struct sk_buff *skb;
1176 while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1178 *err = sock_error(sk);
1182 if (sock_flag(sk, SOCK_DONE))
1185 if ((flags & MSG_DONTWAIT) || !timeo) {
1190 sk_wait_data(sk, &timeo, NULL);
1192 /* Handle signals */
1193 if (signal_pending(current)) {
1194 *err = sock_intr_errno(timeo);
1202 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1203 size_t len, int flags)
1205 struct sock *sk = sock->sk;
1206 struct kcm_sock *kcm = kcm_sk(sk);
1209 struct kcm_rx_msg *rxm;
1211 struct sk_buff *skb;
1213 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1217 skb = kcm_wait_data(sk, flags, timeo, &err);
1221 /* Okay, have a message on the receive queue */
1223 rxm = kcm_rx_msg(skb);
1225 if (len > rxm->full_len)
1226 len = rxm->full_len;
1228 err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1233 if (likely(!(flags & MSG_PEEK))) {
1234 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1235 if (copied < rxm->full_len) {
1236 if (sock->type == SOCK_DGRAM) {
1237 /* Truncated message */
1238 msg->msg_flags |= MSG_TRUNC;
1241 rxm->offset += copied;
1242 rxm->full_len -= copied;
1245 /* Finished with message */
1246 msg->msg_flags |= MSG_EOR;
1247 KCM_STATS_INCR(kcm->stats.rx_msgs);
1248 skb_unlink(skb, &sk->sk_receive_queue);
1256 return copied ? : err;
1259 /* kcm sock lock held */
1260 static void kcm_recv_disable(struct kcm_sock *kcm)
1262 struct kcm_mux *mux = kcm->mux;
1264 if (kcm->rx_disabled)
1267 spin_lock_bh(&mux->rx_lock);
1269 kcm->rx_disabled = 1;
1271 /* If a psock is reserved we'll do cleanup in unreserve */
1272 if (!kcm->rx_psock) {
1274 list_del(&kcm->wait_rx_list);
1275 kcm->rx_wait = false;
1278 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1281 spin_unlock_bh(&mux->rx_lock);
1284 /* kcm sock lock held */
1285 static void kcm_recv_enable(struct kcm_sock *kcm)
1287 struct kcm_mux *mux = kcm->mux;
1289 if (!kcm->rx_disabled)
1292 spin_lock_bh(&mux->rx_lock);
1294 kcm->rx_disabled = 0;
1297 spin_unlock_bh(&mux->rx_lock);
1300 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1301 char __user *optval, unsigned int optlen)
1303 struct kcm_sock *kcm = kcm_sk(sock->sk);
1307 if (level != SOL_KCM)
1308 return -ENOPROTOOPT;
1310 if (optlen < sizeof(int))
1313 if (get_user(val, (int __user *)optval))
1316 valbool = val ? 1 : 0;
1319 case KCM_RECV_DISABLE:
1320 lock_sock(&kcm->sk);
1322 kcm_recv_disable(kcm);
1324 kcm_recv_enable(kcm);
1325 release_sock(&kcm->sk);
1334 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1335 char __user *optval, int __user *optlen)
1337 struct kcm_sock *kcm = kcm_sk(sock->sk);
1340 if (level != SOL_KCM)
1341 return -ENOPROTOOPT;
1343 if (get_user(len, optlen))
1346 len = min_t(unsigned int, len, sizeof(int));
1351 case KCM_RECV_DISABLE:
1352 val = kcm->rx_disabled;
1355 return -ENOPROTOOPT;
1358 if (put_user(len, optlen))
1360 if (copy_to_user(optval, &val, len))
1365 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1367 struct kcm_sock *tkcm;
1368 struct list_head *head;
1371 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1372 * we set sk_state, otherwise epoll_wait always returns right away with
1375 kcm->sk.sk_state = TCP_ESTABLISHED;
1377 /* Add to mux's kcm sockets list */
1379 spin_lock_bh(&mux->lock);
1381 head = &mux->kcm_socks;
1382 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1383 if (tkcm->index != index)
1385 head = &tkcm->kcm_sock_list;
1389 list_add(&kcm->kcm_sock_list, head);
1392 mux->kcm_socks_cnt++;
1393 spin_unlock_bh(&mux->lock);
1395 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1397 spin_lock_bh(&mux->rx_lock);
1399 spin_unlock_bh(&mux->rx_lock);
1402 static int kcm_attach(struct socket *sock, struct socket *csock,
1403 struct bpf_prog *prog)
1405 struct kcm_sock *kcm = kcm_sk(sock->sk);
1406 struct kcm_mux *mux = kcm->mux;
1408 struct kcm_psock *psock = NULL, *tpsock;
1409 struct list_head *head;
1412 if (csock->ops->family != PF_INET &&
1413 csock->ops->family != PF_INET6)
1420 /* Only support TCP for now */
1421 if (csk->sk_protocol != IPPROTO_TCP)
1424 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1430 psock->bpf_prog = prog;
1431 INIT_WORK(&psock->rx_work, psock_rx_work);
1432 INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work);
1436 write_lock_bh(&csk->sk_callback_lock);
1437 psock->save_data_ready = csk->sk_data_ready;
1438 psock->save_write_space = csk->sk_write_space;
1439 psock->save_state_change = csk->sk_state_change;
1440 csk->sk_user_data = psock;
1441 csk->sk_data_ready = psock_tcp_data_ready;
1442 csk->sk_write_space = psock_tcp_write_space;
1443 csk->sk_state_change = psock_tcp_state_change;
1444 write_unlock_bh(&csk->sk_callback_lock);
1446 /* Finished initialization, now add the psock to the MUX. */
1447 spin_lock_bh(&mux->lock);
1448 head = &mux->psocks;
1449 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1450 if (tpsock->index != index)
1452 head = &tpsock->psock_list;
1456 list_add(&psock->psock_list, head);
1457 psock->index = index;
1459 KCM_STATS_INCR(mux->stats.psock_attach);
1461 psock_now_avail(psock);
1462 spin_unlock_bh(&mux->lock);
1464 /* Schedule RX work in case there are already bytes queued */
1465 queue_work(kcm_wq, &psock->rx_work);
1470 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1472 struct socket *csock;
1473 struct bpf_prog *prog;
1476 csock = sockfd_lookup(info->fd, &err);
1480 prog = bpf_prog_get(info->bpf_fd);
1482 err = PTR_ERR(prog);
1486 if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1492 err = kcm_attach(sock, csock, prog);
1498 /* Keep reference on file also */
1506 static void kcm_unattach(struct kcm_psock *psock)
1508 struct sock *csk = psock->sk;
1509 struct kcm_mux *mux = psock->mux;
1511 /* Stop getting callbacks from TCP socket. After this there should
1512 * be no way to reserve a kcm for this psock.
1514 write_lock_bh(&csk->sk_callback_lock);
1515 csk->sk_user_data = NULL;
1516 csk->sk_data_ready = psock->save_data_ready;
1517 csk->sk_write_space = psock->save_write_space;
1518 csk->sk_state_change = psock->save_state_change;
1519 psock->rx_stopped = 1;
1521 if (WARN_ON(psock->rx_kcm)) {
1522 write_unlock_bh(&csk->sk_callback_lock);
1526 spin_lock_bh(&mux->rx_lock);
1528 /* Stop receiver activities. After this point psock should not be
1529 * able to get onto ready list either through callbacks or work.
1531 if (psock->ready_rx_msg) {
1532 list_del(&psock->psock_ready_list);
1533 kfree_skb(psock->ready_rx_msg);
1534 psock->ready_rx_msg = NULL;
1535 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1538 spin_unlock_bh(&mux->rx_lock);
1540 write_unlock_bh(&csk->sk_callback_lock);
1542 cancel_work_sync(&psock->rx_work);
1543 cancel_delayed_work_sync(&psock->rx_delayed_work);
1545 bpf_prog_put(psock->bpf_prog);
1547 kfree_skb(psock->rx_skb_head);
1548 psock->rx_skb_head = NULL;
1550 spin_lock_bh(&mux->lock);
1552 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1554 KCM_STATS_INCR(mux->stats.psock_unattach);
1556 if (psock->tx_kcm) {
1557 /* psock was reserved. Just mark it finished and we will clean
1558 * up in the kcm paths, we need kcm lock which can not be
1561 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1562 spin_unlock_bh(&mux->lock);
1564 /* We are unattaching a socket that is reserved. Abort the
1565 * socket since we may be out of sync in sending on it. We need
1566 * to do this without the mux lock.
1568 kcm_abort_tx_psock(psock, EPIPE, false);
1570 spin_lock_bh(&mux->lock);
1571 if (!psock->tx_kcm) {
1572 /* psock now unreserved in window mux was unlocked */
1577 /* Commit done before queuing work to process it */
1580 /* Queue tx work to make sure psock->done is handled */
1581 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1582 spin_unlock_bh(&mux->lock);
1585 if (!psock->tx_stopped)
1586 list_del(&psock->psock_avail_list);
1587 list_del(&psock->psock_list);
1589 spin_unlock_bh(&mux->lock);
1592 fput(csk->sk_socket->file);
1593 kmem_cache_free(kcm_psockp, psock);
1597 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1599 struct kcm_sock *kcm = kcm_sk(sock->sk);
1600 struct kcm_mux *mux = kcm->mux;
1601 struct kcm_psock *psock;
1602 struct socket *csock;
1606 csock = sockfd_lookup(info->fd, &err);
1618 spin_lock_bh(&mux->lock);
1620 list_for_each_entry(psock, &mux->psocks, psock_list) {
1621 if (psock->sk != csk)
1624 /* Found the matching psock */
1626 if (psock->unattaching || WARN_ON(psock->done)) {
1631 psock->unattaching = 1;
1633 spin_unlock_bh(&mux->lock);
1635 kcm_unattach(psock);
1641 spin_unlock_bh(&mux->lock);
1648 static struct proto kcm_proto = {
1650 .owner = THIS_MODULE,
1651 .obj_size = sizeof(struct kcm_sock),
1654 /* Clone a kcm socket. */
1655 static int kcm_clone(struct socket *osock, struct kcm_clone *info,
1656 struct socket **newsockp)
1658 struct socket *newsock;
1660 struct file *newfile;
1664 newsock = sock_alloc();
1668 newsock->type = osock->type;
1669 newsock->ops = osock->ops;
1671 __module_get(newsock->ops->owner);
1673 newfd = get_unused_fd_flags(0);
1674 if (unlikely(newfd < 0)) {
1679 newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1680 if (unlikely(IS_ERR(newfile))) {
1681 err = PTR_ERR(newfile);
1682 goto out_sock_alloc_fail;
1685 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1689 goto out_sk_alloc_fail;
1692 sock_init_data(newsock, newsk);
1693 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1695 fd_install(newfd, newfile);
1696 *newsockp = newsock;
1703 out_sock_alloc_fail:
1704 put_unused_fd(newfd);
1706 sock_release(newsock);
1711 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1716 case SIOCKCMATTACH: {
1717 struct kcm_attach info;
1719 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1722 err = kcm_attach_ioctl(sock, &info);
1726 case SIOCKCMUNATTACH: {
1727 struct kcm_unattach info;
1729 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1732 err = kcm_unattach_ioctl(sock, &info);
1736 case SIOCKCMCLONE: {
1737 struct kcm_clone info;
1738 struct socket *newsock = NULL;
1740 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1743 err = kcm_clone(sock, &info, &newsock);
1746 if (copy_to_user((void __user *)arg, &info,
1749 sock_release(newsock);
1763 static void free_mux(struct rcu_head *rcu)
1765 struct kcm_mux *mux = container_of(rcu,
1766 struct kcm_mux, rcu);
1768 kmem_cache_free(kcm_muxp, mux);
1771 static void release_mux(struct kcm_mux *mux)
1773 struct kcm_net *knet = mux->knet;
1774 struct kcm_psock *psock, *tmp_psock;
1776 /* Release psocks */
1777 list_for_each_entry_safe(psock, tmp_psock,
1778 &mux->psocks, psock_list) {
1779 if (!WARN_ON(psock->unattaching))
1780 kcm_unattach(psock);
1783 if (WARN_ON(mux->psocks_cnt))
1786 __skb_queue_purge(&mux->rx_hold_queue);
1788 mutex_lock(&knet->mutex);
1789 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1790 aggregate_psock_stats(&mux->aggregate_psock_stats,
1791 &knet->aggregate_psock_stats);
1792 list_del_rcu(&mux->kcm_mux_list);
1794 mutex_unlock(&knet->mutex);
1796 call_rcu(&mux->rcu, free_mux);
1799 static void kcm_done(struct kcm_sock *kcm)
1801 struct kcm_mux *mux = kcm->mux;
1802 struct sock *sk = &kcm->sk;
1805 spin_lock_bh(&mux->rx_lock);
1806 if (kcm->rx_psock) {
1807 /* Cleanup in unreserve_rx_kcm */
1809 kcm->rx_disabled = 1;
1811 spin_unlock_bh(&mux->rx_lock);
1816 list_del(&kcm->wait_rx_list);
1817 kcm->rx_wait = false;
1819 /* Move any pending receive messages to other kcm sockets */
1820 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1822 spin_unlock_bh(&mux->rx_lock);
1824 if (WARN_ON(sk_rmem_alloc_get(sk)))
1827 /* Detach from MUX */
1828 spin_lock_bh(&mux->lock);
1830 list_del(&kcm->kcm_sock_list);
1831 mux->kcm_socks_cnt--;
1832 socks_cnt = mux->kcm_socks_cnt;
1834 spin_unlock_bh(&mux->lock);
1837 /* We are done with the mux now. */
1841 WARN_ON(kcm->rx_wait);
1846 /* Called by kcm_release to close a KCM socket.
1847 * If this is the last KCM socket on the MUX, destroy the MUX.
1849 static int kcm_release(struct socket *sock)
1851 struct sock *sk = sock->sk;
1852 struct kcm_sock *kcm;
1853 struct kcm_mux *mux;
1854 struct kcm_psock *psock;
1863 kfree_skb(kcm->seq_skb);
1866 /* Purge queue under lock to avoid race condition with tx_work trying
1867 * to act when queue is nonempty. If tx_work runs after this point
1868 * it will just return.
1870 __skb_queue_purge(&sk->sk_write_queue);
1873 spin_lock_bh(&mux->lock);
1875 /* Take of tx_wait list, after this point there should be no way
1876 * that a psock will be assigned to this kcm.
1878 list_del(&kcm->wait_psock_list);
1879 kcm->tx_wait = false;
1881 spin_unlock_bh(&mux->lock);
1883 /* Cancel work. After this point there should be no outside references
1884 * to the kcm socket.
1886 cancel_work_sync(&kcm->tx_work);
1889 psock = kcm->tx_psock;
1891 /* A psock was reserved, so we need to kill it since it
1892 * may already have some bytes queued from a message. We
1893 * need to do this after removing kcm from tx_wait list.
1895 kcm_abort_tx_psock(psock, EPIPE, false);
1896 unreserve_psock(kcm);
1900 WARN_ON(kcm->tx_wait);
1901 WARN_ON(kcm->tx_psock);
1910 static const struct proto_ops kcm_ops = {
1912 .owner = THIS_MODULE,
1913 .release = kcm_release,
1914 .bind = sock_no_bind,
1915 .connect = sock_no_connect,
1916 .socketpair = sock_no_socketpair,
1917 .accept = sock_no_accept,
1918 .getname = sock_no_getname,
1919 .poll = datagram_poll,
1921 .listen = sock_no_listen,
1922 .shutdown = sock_no_shutdown,
1923 .setsockopt = kcm_setsockopt,
1924 .getsockopt = kcm_getsockopt,
1925 .sendmsg = kcm_sendmsg,
1926 .recvmsg = kcm_recvmsg,
1927 .mmap = sock_no_mmap,
1928 .sendpage = sock_no_sendpage,
1931 /* Create proto operation for kcm sockets */
1932 static int kcm_create(struct net *net, struct socket *sock,
1933 int protocol, int kern)
1935 struct kcm_net *knet = net_generic(net, kcm_net_id);
1937 struct kcm_mux *mux;
1939 switch (sock->type) {
1941 case SOCK_SEQPACKET:
1942 sock->ops = &kcm_ops;
1945 return -ESOCKTNOSUPPORT;
1948 if (protocol != KCMPROTO_CONNECTED)
1949 return -EPROTONOSUPPORT;
1951 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1955 /* Allocate a kcm mux, shared between KCM sockets */
1956 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1962 spin_lock_init(&mux->lock);
1963 spin_lock_init(&mux->rx_lock);
1964 INIT_LIST_HEAD(&mux->kcm_socks);
1965 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1966 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1968 INIT_LIST_HEAD(&mux->psocks);
1969 INIT_LIST_HEAD(&mux->psocks_ready);
1970 INIT_LIST_HEAD(&mux->psocks_avail);
1974 /* Add new MUX to list */
1975 mutex_lock(&knet->mutex);
1976 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1978 mutex_unlock(&knet->mutex);
1980 skb_queue_head_init(&mux->rx_hold_queue);
1982 /* Init KCM socket */
1983 sock_init_data(sock, sk);
1984 init_kcm_sock(kcm_sk(sk), mux);
1989 static struct net_proto_family kcm_family_ops = {
1991 .create = kcm_create,
1992 .owner = THIS_MODULE,
1995 static __net_init int kcm_init_net(struct net *net)
1997 struct kcm_net *knet = net_generic(net, kcm_net_id);
1999 INIT_LIST_HEAD_RCU(&knet->mux_list);
2000 mutex_init(&knet->mutex);
2005 static __net_exit void kcm_exit_net(struct net *net)
2007 struct kcm_net *knet = net_generic(net, kcm_net_id);
2009 /* All KCM sockets should be closed at this point, which should mean
2010 * that all multiplexors and psocks have been destroyed.
2012 WARN_ON(!list_empty(&knet->mux_list));
2015 static struct pernet_operations kcm_net_ops = {
2016 .init = kcm_init_net,
2017 .exit = kcm_exit_net,
2019 .size = sizeof(struct kcm_net),
2022 static int __init kcm_init(void)
2026 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2027 sizeof(struct kcm_mux), 0,
2028 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2032 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2033 sizeof(struct kcm_psock), 0,
2034 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2038 kcm_wq = create_singlethread_workqueue("kkcmd");
2042 err = proto_register(&kcm_proto, 1);
2046 err = sock_register(&kcm_family_ops);
2048 goto sock_register_fail;
2050 err = register_pernet_device(&kcm_net_ops);
2054 err = kcm_proc_init();
2056 goto proc_init_fail;
2061 unregister_pernet_device(&kcm_net_ops);
2064 sock_unregister(PF_KCM);
2067 proto_unregister(&kcm_proto);
2070 kmem_cache_destroy(kcm_muxp);
2071 kmem_cache_destroy(kcm_psockp);
2074 destroy_workqueue(kcm_wq);
2079 static void __exit kcm_exit(void)
2082 unregister_pernet_device(&kcm_net_ops);
2083 sock_unregister(PF_KCM);
2084 proto_unregister(&kcm_proto);
2085 destroy_workqueue(kcm_wq);
2087 kmem_cache_destroy(kcm_muxp);
2088 kmem_cache_destroy(kcm_psockp);
2091 module_init(kcm_init);
2092 module_exit(kcm_exit);
2094 MODULE_LICENSE("GPL");
2095 MODULE_ALIAS_NETPROTO(PF_KCM);