]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/kcm/kcmsock.c
f938d7d3e6e2a2d851922db48cfabedd442665ad
[karo-tx-linux.git] / net / kcm / kcmsock.c
1 #include <linux/bpf.h>
2 #include <linux/errno.h>
3 #include <linux/errqueue.h>
4 #include <linux/file.h>
5 #include <linux/in.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/net.h>
9 #include <linux/netdevice.h>
10 #include <linux/poll.h>
11 #include <linux/rculist.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/uaccess.h>
15 #include <linux/workqueue.h>
16 #include <net/kcm.h>
17 #include <net/netns/generic.h>
18 #include <net/sock.h>
19 #include <net/tcp.h>
20 #include <uapi/linux/kcm.h>
21
22 unsigned int kcm_net_id;
23
24 static struct kmem_cache *kcm_psockp __read_mostly;
25 static struct kmem_cache *kcm_muxp __read_mostly;
26 static struct workqueue_struct *kcm_wq;
27
28 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
29 {
30         return (struct kcm_sock *)sk;
31 }
32
33 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
34 {
35         return (struct kcm_tx_msg *)skb->cb;
36 }
37
38 static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
39 {
40         return (struct kcm_rx_msg *)((void *)skb->cb +
41                                      offsetof(struct qdisc_skb_cb, data));
42 }
43
44 static void report_csk_error(struct sock *csk, int err)
45 {
46         csk->sk_err = EPIPE;
47         csk->sk_error_report(csk);
48 }
49
50 /* Callback lock held */
51 static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
52                                struct sk_buff *skb)
53 {
54         struct sock *csk = psock->sk;
55
56         /* Unrecoverable error in receive */
57
58         if (psock->rx_stopped)
59                 return;
60
61         psock->rx_stopped = 1;
62         KCM_STATS_INCR(psock->stats.rx_aborts);
63
64         /* Report an error on the lower socket */
65         report_csk_error(csk, err);
66 }
67
68 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
69                                bool wakeup_kcm)
70 {
71         struct sock *csk = psock->sk;
72         struct kcm_mux *mux = psock->mux;
73
74         /* Unrecoverable error in transmit */
75
76         spin_lock_bh(&mux->lock);
77
78         if (psock->tx_stopped) {
79                 spin_unlock_bh(&mux->lock);
80                 return;
81         }
82
83         psock->tx_stopped = 1;
84         KCM_STATS_INCR(psock->stats.tx_aborts);
85
86         if (!psock->tx_kcm) {
87                 /* Take off psocks_avail list */
88                 list_del(&psock->psock_avail_list);
89         } else if (wakeup_kcm) {
90                 /* In this case psock is being aborted while outside of
91                  * write_msgs and psock is reserved. Schedule tx_work
92                  * to handle the failure there. Need to commit tx_stopped
93                  * before queuing work.
94                  */
95                 smp_mb();
96
97                 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
98         }
99
100         spin_unlock_bh(&mux->lock);
101
102         /* Report error on lower socket */
103         report_csk_error(csk, err);
104 }
105
106 /* RX mux lock held. */
107 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
108                                     struct kcm_psock *psock)
109 {
110         KCM_STATS_ADD(mux->stats.rx_bytes,
111                       psock->stats.rx_bytes - psock->saved_rx_bytes);
112         mux->stats.rx_msgs +=
113                 psock->stats.rx_msgs - psock->saved_rx_msgs;
114         psock->saved_rx_msgs = psock->stats.rx_msgs;
115         psock->saved_rx_bytes = psock->stats.rx_bytes;
116 }
117
118 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
119                                     struct kcm_psock *psock)
120 {
121         KCM_STATS_ADD(mux->stats.tx_bytes,
122                       psock->stats.tx_bytes - psock->saved_tx_bytes);
123         mux->stats.tx_msgs +=
124                 psock->stats.tx_msgs - psock->saved_tx_msgs;
125         psock->saved_tx_msgs = psock->stats.tx_msgs;
126         psock->saved_tx_bytes = psock->stats.tx_bytes;
127 }
128
129 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
130
131 /* KCM is ready to receive messages on its queue-- either the KCM is new or
132  * has become unblocked after being blocked on full socket buffer. Queue any
133  * pending ready messages on a psock. RX mux lock held.
134  */
135 static void kcm_rcv_ready(struct kcm_sock *kcm)
136 {
137         struct kcm_mux *mux = kcm->mux;
138         struct kcm_psock *psock;
139         struct sk_buff *skb;
140
141         if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
142                 return;
143
144         while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
145                 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
146                         /* Assuming buffer limit has been reached */
147                         skb_queue_head(&mux->rx_hold_queue, skb);
148                         WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
149                         return;
150                 }
151         }
152
153         while (!list_empty(&mux->psocks_ready)) {
154                 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
155                                          psock_ready_list);
156
157                 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
158                         /* Assuming buffer limit has been reached */
159                         WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
160                         return;
161                 }
162
163                 /* Consumed the ready message on the psock. Schedule rx_work to
164                  * get more messages.
165                  */
166                 list_del(&psock->psock_ready_list);
167                 psock->ready_rx_msg = NULL;
168
169                 /* Commit clearing of ready_rx_msg for queuing work */
170                 smp_mb();
171
172                 queue_work(kcm_wq, &psock->rx_work);
173         }
174
175         /* Buffer limit is okay now, add to ready list */
176         list_add_tail(&kcm->wait_rx_list,
177                       &kcm->mux->kcm_rx_waiters);
178         kcm->rx_wait = true;
179 }
180
181 static void kcm_rfree(struct sk_buff *skb)
182 {
183         struct sock *sk = skb->sk;
184         struct kcm_sock *kcm = kcm_sk(sk);
185         struct kcm_mux *mux = kcm->mux;
186         unsigned int len = skb->truesize;
187
188         sk_mem_uncharge(sk, len);
189         atomic_sub(len, &sk->sk_rmem_alloc);
190
191         /* For reading rx_wait and rx_psock without holding lock */
192         smp_mb__after_atomic();
193
194         if (!kcm->rx_wait && !kcm->rx_psock &&
195             sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
196                 spin_lock_bh(&mux->rx_lock);
197                 kcm_rcv_ready(kcm);
198                 spin_unlock_bh(&mux->rx_lock);
199         }
200 }
201
202 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
203 {
204         struct sk_buff_head *list = &sk->sk_receive_queue;
205
206         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
207                 return -ENOMEM;
208
209         if (!sk_rmem_schedule(sk, skb, skb->truesize))
210                 return -ENOBUFS;
211
212         skb->dev = NULL;
213
214         skb_orphan(skb);
215         skb->sk = sk;
216         skb->destructor = kcm_rfree;
217         atomic_add(skb->truesize, &sk->sk_rmem_alloc);
218         sk_mem_charge(sk, skb->truesize);
219
220         skb_queue_tail(list, skb);
221
222         if (!sock_flag(sk, SOCK_DEAD))
223                 sk->sk_data_ready(sk);
224
225         return 0;
226 }
227
228 /* Requeue received messages for a kcm socket to other kcm sockets. This is
229  * called with a kcm socket is receive disabled.
230  * RX mux lock held.
231  */
232 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
233 {
234         struct sk_buff *skb;
235         struct kcm_sock *kcm;
236
237         while ((skb = __skb_dequeue(head))) {
238                 /* Reset destructor to avoid calling kcm_rcv_ready */
239                 skb->destructor = sock_rfree;
240                 skb_orphan(skb);
241 try_again:
242                 if (list_empty(&mux->kcm_rx_waiters)) {
243                         skb_queue_tail(&mux->rx_hold_queue, skb);
244                         continue;
245                 }
246
247                 kcm = list_first_entry(&mux->kcm_rx_waiters,
248                                        struct kcm_sock, wait_rx_list);
249
250                 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
251                         /* Should mean socket buffer full */
252                         list_del(&kcm->wait_rx_list);
253                         kcm->rx_wait = false;
254
255                         /* Commit rx_wait to read in kcm_free */
256                         smp_wmb();
257
258                         goto try_again;
259                 }
260         }
261 }
262
263 /* Lower sock lock held */
264 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
265                                        struct sk_buff *head)
266 {
267         struct kcm_mux *mux = psock->mux;
268         struct kcm_sock *kcm;
269
270         WARN_ON(psock->ready_rx_msg);
271
272         if (psock->rx_kcm)
273                 return psock->rx_kcm;
274
275         spin_lock_bh(&mux->rx_lock);
276
277         if (psock->rx_kcm) {
278                 spin_unlock_bh(&mux->rx_lock);
279                 return psock->rx_kcm;
280         }
281
282         kcm_update_rx_mux_stats(mux, psock);
283
284         if (list_empty(&mux->kcm_rx_waiters)) {
285                 psock->ready_rx_msg = head;
286                 list_add_tail(&psock->psock_ready_list,
287                               &mux->psocks_ready);
288                 spin_unlock_bh(&mux->rx_lock);
289                 return NULL;
290         }
291
292         kcm = list_first_entry(&mux->kcm_rx_waiters,
293                                struct kcm_sock, wait_rx_list);
294         list_del(&kcm->wait_rx_list);
295         kcm->rx_wait = false;
296
297         psock->rx_kcm = kcm;
298         kcm->rx_psock = psock;
299
300         spin_unlock_bh(&mux->rx_lock);
301
302         return kcm;
303 }
304
305 static void kcm_done(struct kcm_sock *kcm);
306
307 static void kcm_done_work(struct work_struct *w)
308 {
309         kcm_done(container_of(w, struct kcm_sock, done_work));
310 }
311
312 /* Lower sock held */
313 static void unreserve_rx_kcm(struct kcm_psock *psock,
314                              bool rcv_ready)
315 {
316         struct kcm_sock *kcm = psock->rx_kcm;
317         struct kcm_mux *mux = psock->mux;
318
319         if (!kcm)
320                 return;
321
322         spin_lock_bh(&mux->rx_lock);
323
324         psock->rx_kcm = NULL;
325         kcm->rx_psock = NULL;
326
327         /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
328          * kcm_rfree
329          */
330         smp_mb();
331
332         if (unlikely(kcm->done)) {
333                 spin_unlock_bh(&mux->rx_lock);
334
335                 /* Need to run kcm_done in a task since we need to qcquire
336                  * callback locks which may already be held here.
337                  */
338                 INIT_WORK(&kcm->done_work, kcm_done_work);
339                 schedule_work(&kcm->done_work);
340                 return;
341         }
342
343         if (unlikely(kcm->rx_disabled)) {
344                 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
345         } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
346                 /* Check for degenerative race with rx_wait that all
347                  * data was dequeued (accounted for in kcm_rfree).
348                  */
349                 kcm_rcv_ready(kcm);
350         }
351         spin_unlock_bh(&mux->rx_lock);
352 }
353
354 /* Macro to invoke filter function. */
355 #define KCM_RUN_FILTER(prog, ctx) \
356         (*prog->bpf_func)(ctx, prog->insnsi)
357
358 /* Lower socket lock held */
359 static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
360                         unsigned int orig_offset, size_t orig_len)
361 {
362         struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
363         struct kcm_rx_msg *rxm;
364         struct kcm_sock *kcm;
365         struct sk_buff *head, *skb;
366         size_t eaten = 0, cand_len;
367         ssize_t extra;
368         int err;
369         bool cloned_orig = false;
370
371         if (psock->ready_rx_msg)
372                 return 0;
373
374         head = psock->rx_skb_head;
375         if (head) {
376                 /* Message already in progress */
377
378                 if (unlikely(orig_offset)) {
379                         /* Getting data with a non-zero offset when a message is
380                          * in progress is not expected. If it does happen, we
381                          * need to clone and pull since we can't deal with
382                          * offsets in the skbs for a message expect in the head.
383                          */
384                         orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
385                         if (!orig_skb) {
386                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
387                                 desc->error = -ENOMEM;
388                                 return 0;
389                         }
390                         if (!pskb_pull(orig_skb, orig_offset)) {
391                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
392                                 kfree_skb(orig_skb);
393                                 desc->error = -ENOMEM;
394                                 return 0;
395                         }
396                         cloned_orig = true;
397                         orig_offset = 0;
398                 }
399
400                 if (!psock->rx_skb_nextp) {
401                         /* We are going to append to the frags_list of head.
402                          * Need to unshare the frag_list.
403                          */
404                         err = skb_unclone(head, GFP_ATOMIC);
405                         if (err) {
406                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
407                                 desc->error = err;
408                                 return 0;
409                         }
410
411                         if (unlikely(skb_shinfo(head)->frag_list)) {
412                                 /* We can't append to an sk_buff that already
413                                  * has a frag_list. We create a new head, point
414                                  * the frag_list of that to the old head, and
415                                  * then are able to use the old head->next for
416                                  * appending to the message.
417                                  */
418                                 if (WARN_ON(head->next)) {
419                                         desc->error = -EINVAL;
420                                         return 0;
421                                 }
422
423                                 skb = alloc_skb(0, GFP_ATOMIC);
424                                 if (!skb) {
425                                         KCM_STATS_INCR(psock->stats.rx_mem_fail);
426                                         desc->error = -ENOMEM;
427                                         return 0;
428                                 }
429                                 skb->len = head->len;
430                                 skb->data_len = head->len;
431                                 skb->truesize = head->truesize;
432                                 *kcm_rx_msg(skb) = *kcm_rx_msg(head);
433                                 psock->rx_skb_nextp = &head->next;
434                                 skb_shinfo(skb)->frag_list = head;
435                                 psock->rx_skb_head = skb;
436                                 head = skb;
437                         } else {
438                                 psock->rx_skb_nextp =
439                                     &skb_shinfo(head)->frag_list;
440                         }
441                 }
442         }
443
444         while (eaten < orig_len) {
445                 /* Always clone since we will consume something */
446                 skb = skb_clone(orig_skb, GFP_ATOMIC);
447                 if (!skb) {
448                         KCM_STATS_INCR(psock->stats.rx_mem_fail);
449                         desc->error = -ENOMEM;
450                         break;
451                 }
452
453                 cand_len = orig_len - eaten;
454
455                 head = psock->rx_skb_head;
456                 if (!head) {
457                         head = skb;
458                         psock->rx_skb_head = head;
459                         /* Will set rx_skb_nextp on next packet if needed */
460                         psock->rx_skb_nextp = NULL;
461                         rxm = kcm_rx_msg(head);
462                         memset(rxm, 0, sizeof(*rxm));
463                         rxm->offset = orig_offset + eaten;
464                 } else {
465                         /* Unclone since we may be appending to an skb that we
466                          * already share a frag_list with.
467                          */
468                         err = skb_unclone(skb, GFP_ATOMIC);
469                         if (err) {
470                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
471                                 desc->error = err;
472                                 break;
473                         }
474
475                         rxm = kcm_rx_msg(head);
476                         *psock->rx_skb_nextp = skb;
477                         psock->rx_skb_nextp = &skb->next;
478                         head->data_len += skb->len;
479                         head->len += skb->len;
480                         head->truesize += skb->truesize;
481                 }
482
483                 if (!rxm->full_len) {
484                         ssize_t len;
485
486                         len = KCM_RUN_FILTER(psock->bpf_prog, head);
487
488                         if (!len) {
489                                 /* Need more header to determine length */
490                                 rxm->accum_len += cand_len;
491                                 eaten += cand_len;
492                                 KCM_STATS_INCR(psock->stats.rx_need_more_hdr);
493                                 WARN_ON(eaten != orig_len);
494                                 break;
495                         } else if (len <= (ssize_t)head->len -
496                                           skb->len - rxm->offset) {
497                                 /* Length must be into new skb (and also
498                                  * greater than zero)
499                                  */
500                                 KCM_STATS_INCR(psock->stats.rx_bad_hdr_len);
501                                 desc->error = -EPROTO;
502                                 psock->rx_skb_head = NULL;
503                                 kcm_abort_rx_psock(psock, EPROTO, head);
504                                 break;
505                         }
506
507                         rxm->full_len = len;
508                 }
509
510                 extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
511
512                 if (extra < 0) {
513                         /* Message not complete yet. */
514                         rxm->accum_len += cand_len;
515                         eaten += cand_len;
516                         WARN_ON(eaten != orig_len);
517                         break;
518                 }
519
520                 /* Positive extra indicates ore bytes than needed for the
521                  * message
522                  */
523
524                 WARN_ON(extra > cand_len);
525
526                 eaten += (cand_len - extra);
527
528                 /* Hurray, we have a new message! */
529                 psock->rx_skb_head = NULL;
530                 KCM_STATS_INCR(psock->stats.rx_msgs);
531
532 try_queue:
533                 kcm = reserve_rx_kcm(psock, head);
534                 if (!kcm) {
535                         /* Unable to reserve a KCM, message is held in psock. */
536                         break;
537                 }
538
539                 if (kcm_queue_rcv_skb(&kcm->sk, head)) {
540                         /* Should mean socket buffer full */
541                         unreserve_rx_kcm(psock, false);
542                         goto try_queue;
543                 }
544         }
545
546         if (cloned_orig)
547                 kfree_skb(orig_skb);
548
549         KCM_STATS_ADD(psock->stats.rx_bytes, eaten);
550
551         return eaten;
552 }
553
554 /* Called with lock held on lower socket */
555 static int psock_tcp_read_sock(struct kcm_psock *psock)
556 {
557         read_descriptor_t desc;
558
559         desc.arg.data = psock;
560         desc.error = 0;
561         desc.count = 1; /* give more than one skb per call */
562
563         /* sk should be locked here, so okay to do tcp_read_sock */
564         tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
565
566         unreserve_rx_kcm(psock, true);
567
568         return desc.error;
569 }
570
571 /* Lower sock lock held */
572 static void psock_tcp_data_ready(struct sock *sk)
573 {
574         struct kcm_psock *psock;
575
576         read_lock_bh(&sk->sk_callback_lock);
577
578         psock = (struct kcm_psock *)sk->sk_user_data;
579         if (unlikely(!psock || psock->rx_stopped))
580                 goto out;
581
582         if (psock->ready_rx_msg)
583                 goto out;
584
585         if (psock_tcp_read_sock(psock) == -ENOMEM)
586                 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
587
588 out:
589         read_unlock_bh(&sk->sk_callback_lock);
590 }
591
592 static void do_psock_rx_work(struct kcm_psock *psock)
593 {
594         read_descriptor_t rd_desc;
595         struct sock *csk = psock->sk;
596
597         /* We need the read lock to synchronize with psock_tcp_data_ready. We
598          * need the socket lock for calling tcp_read_sock.
599          */
600         lock_sock(csk);
601         read_lock_bh(&csk->sk_callback_lock);
602
603         if (unlikely(csk->sk_user_data != psock))
604                 goto out;
605
606         if (unlikely(psock->rx_stopped))
607                 goto out;
608
609         if (psock->ready_rx_msg)
610                 goto out;
611
612         rd_desc.arg.data = psock;
613
614         if (psock_tcp_read_sock(psock) == -ENOMEM)
615                 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
616
617 out:
618         read_unlock_bh(&csk->sk_callback_lock);
619         release_sock(csk);
620 }
621
622 static void psock_rx_work(struct work_struct *w)
623 {
624         do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
625 }
626
627 static void psock_rx_delayed_work(struct work_struct *w)
628 {
629         do_psock_rx_work(container_of(w, struct kcm_psock,
630                                       rx_delayed_work.work));
631 }
632
633 static void psock_tcp_state_change(struct sock *sk)
634 {
635         /* TCP only does a POLLIN for a half close. Do a POLLHUP here
636          * since application will normally not poll with POLLIN
637          * on the TCP sockets.
638          */
639
640         report_csk_error(sk, EPIPE);
641 }
642
643 static void psock_tcp_write_space(struct sock *sk)
644 {
645         struct kcm_psock *psock;
646         struct kcm_mux *mux;
647         struct kcm_sock *kcm;
648
649         read_lock_bh(&sk->sk_callback_lock);
650
651         psock = (struct kcm_psock *)sk->sk_user_data;
652         if (unlikely(!psock))
653                 goto out;
654
655         mux = psock->mux;
656
657         spin_lock_bh(&mux->lock);
658
659         /* Check if the socket is reserved so someone is waiting for sending. */
660         kcm = psock->tx_kcm;
661         if (kcm)
662                 queue_work(kcm_wq, &kcm->tx_work);
663
664         spin_unlock_bh(&mux->lock);
665 out:
666         read_unlock_bh(&sk->sk_callback_lock);
667 }
668
669 static void unreserve_psock(struct kcm_sock *kcm);
670
671 /* kcm sock is locked. */
672 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
673 {
674         struct kcm_mux *mux = kcm->mux;
675         struct kcm_psock *psock;
676
677         psock = kcm->tx_psock;
678
679         smp_rmb(); /* Must read tx_psock before tx_wait */
680
681         if (psock) {
682                 WARN_ON(kcm->tx_wait);
683                 if (unlikely(psock->tx_stopped))
684                         unreserve_psock(kcm);
685                 else
686                         return kcm->tx_psock;
687         }
688
689         spin_lock_bh(&mux->lock);
690
691         /* Check again under lock to see if psock was reserved for this
692          * psock via psock_unreserve.
693          */
694         psock = kcm->tx_psock;
695         if (unlikely(psock)) {
696                 WARN_ON(kcm->tx_wait);
697                 spin_unlock_bh(&mux->lock);
698                 return kcm->tx_psock;
699         }
700
701         if (!list_empty(&mux->psocks_avail)) {
702                 psock = list_first_entry(&mux->psocks_avail,
703                                          struct kcm_psock,
704                                          psock_avail_list);
705                 list_del(&psock->psock_avail_list);
706                 if (kcm->tx_wait) {
707                         list_del(&kcm->wait_psock_list);
708                         kcm->tx_wait = false;
709                 }
710                 kcm->tx_psock = psock;
711                 psock->tx_kcm = kcm;
712                 KCM_STATS_INCR(psock->stats.reserved);
713         } else if (!kcm->tx_wait) {
714                 list_add_tail(&kcm->wait_psock_list,
715                               &mux->kcm_tx_waiters);
716                 kcm->tx_wait = true;
717         }
718
719         spin_unlock_bh(&mux->lock);
720
721         return psock;
722 }
723
724 /* mux lock held */
725 static void psock_now_avail(struct kcm_psock *psock)
726 {
727         struct kcm_mux *mux = psock->mux;
728         struct kcm_sock *kcm;
729
730         if (list_empty(&mux->kcm_tx_waiters)) {
731                 list_add_tail(&psock->psock_avail_list,
732                               &mux->psocks_avail);
733         } else {
734                 kcm = list_first_entry(&mux->kcm_tx_waiters,
735                                        struct kcm_sock,
736                                        wait_psock_list);
737                 list_del(&kcm->wait_psock_list);
738                 kcm->tx_wait = false;
739                 psock->tx_kcm = kcm;
740
741                 /* Commit before changing tx_psock since that is read in
742                  * reserve_psock before queuing work.
743                  */
744                 smp_mb();
745
746                 kcm->tx_psock = psock;
747                 KCM_STATS_INCR(psock->stats.reserved);
748                 queue_work(kcm_wq, &kcm->tx_work);
749         }
750 }
751
752 /* kcm sock is locked. */
753 static void unreserve_psock(struct kcm_sock *kcm)
754 {
755         struct kcm_psock *psock;
756         struct kcm_mux *mux = kcm->mux;
757
758         spin_lock_bh(&mux->lock);
759
760         psock = kcm->tx_psock;
761
762         if (WARN_ON(!psock)) {
763                 spin_unlock_bh(&mux->lock);
764                 return;
765         }
766
767         smp_rmb(); /* Read tx_psock before tx_wait */
768
769         kcm_update_tx_mux_stats(mux, psock);
770
771         WARN_ON(kcm->tx_wait);
772
773         kcm->tx_psock = NULL;
774         psock->tx_kcm = NULL;
775         KCM_STATS_INCR(psock->stats.unreserved);
776
777         if (unlikely(psock->tx_stopped)) {
778                 if (psock->done) {
779                         /* Deferred free */
780                         list_del(&psock->psock_list);
781                         mux->psocks_cnt--;
782                         sock_put(psock->sk);
783                         fput(psock->sk->sk_socket->file);
784                         kmem_cache_free(kcm_psockp, psock);
785                 }
786
787                 /* Don't put back on available list */
788
789                 spin_unlock_bh(&mux->lock);
790
791                 return;
792         }
793
794         psock_now_avail(psock);
795
796         spin_unlock_bh(&mux->lock);
797 }
798
799 static void kcm_report_tx_retry(struct kcm_sock *kcm)
800 {
801         struct kcm_mux *mux = kcm->mux;
802
803         spin_lock_bh(&mux->lock);
804         KCM_STATS_INCR(mux->stats.tx_retries);
805         spin_unlock_bh(&mux->lock);
806 }
807
808 /* Write any messages ready on the kcm socket.  Called with kcm sock lock
809  * held.  Return bytes actually sent or error.
810  */
811 static int kcm_write_msgs(struct kcm_sock *kcm)
812 {
813         struct sock *sk = &kcm->sk;
814         struct kcm_psock *psock;
815         struct sk_buff *skb, *head;
816         struct kcm_tx_msg *txm;
817         unsigned short fragidx, frag_offset;
818         unsigned int sent, total_sent = 0;
819         int ret = 0;
820
821         kcm->tx_wait_more = false;
822         psock = kcm->tx_psock;
823         if (unlikely(psock && psock->tx_stopped)) {
824                 /* A reserved psock was aborted asynchronously. Unreserve
825                  * it and we'll retry the message.
826                  */
827                 unreserve_psock(kcm);
828                 kcm_report_tx_retry(kcm);
829                 if (skb_queue_empty(&sk->sk_write_queue))
830                         return 0;
831
832                 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
833
834         } else if (skb_queue_empty(&sk->sk_write_queue)) {
835                 return 0;
836         }
837
838         head = skb_peek(&sk->sk_write_queue);
839         txm = kcm_tx_msg(head);
840
841         if (txm->sent) {
842                 /* Send of first skbuff in queue already in progress */
843                 if (WARN_ON(!psock)) {
844                         ret = -EINVAL;
845                         goto out;
846                 }
847                 sent = txm->sent;
848                 frag_offset = txm->frag_offset;
849                 fragidx = txm->fragidx;
850                 skb = txm->frag_skb;
851
852                 goto do_frag;
853         }
854
855 try_again:
856         psock = reserve_psock(kcm);
857         if (!psock)
858                 goto out;
859
860         do {
861                 skb = head;
862                 txm = kcm_tx_msg(head);
863                 sent = 0;
864
865 do_frag_list:
866                 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
867                         ret = -EINVAL;
868                         goto out;
869                 }
870
871                 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
872                      fragidx++) {
873                         skb_frag_t *frag;
874
875                         frag_offset = 0;
876 do_frag:
877                         frag = &skb_shinfo(skb)->frags[fragidx];
878                         if (WARN_ON(!frag->size)) {
879                                 ret = -EINVAL;
880                                 goto out;
881                         }
882
883                         ret = kernel_sendpage(psock->sk->sk_socket,
884                                               frag->page.p,
885                                               frag->page_offset + frag_offset,
886                                               frag->size - frag_offset,
887                                               MSG_DONTWAIT);
888                         if (ret <= 0) {
889                                 if (ret == -EAGAIN) {
890                                         /* Save state to try again when there's
891                                          * write space on the socket
892                                          */
893                                         txm->sent = sent;
894                                         txm->frag_offset = frag_offset;
895                                         txm->fragidx = fragidx;
896                                         txm->frag_skb = skb;
897
898                                         ret = 0;
899                                         goto out;
900                                 }
901
902                                 /* Hard failure in sending message, abort this
903                                  * psock since it has lost framing
904                                  * synchonization and retry sending the
905                                  * message from the beginning.
906                                  */
907                                 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
908                                                    true);
909                                 unreserve_psock(kcm);
910
911                                 txm->sent = 0;
912                                 kcm_report_tx_retry(kcm);
913                                 ret = 0;
914
915                                 goto try_again;
916                         }
917
918                         sent += ret;
919                         frag_offset += ret;
920                         KCM_STATS_ADD(psock->stats.tx_bytes, ret);
921                         if (frag_offset < frag->size) {
922                                 /* Not finished with this frag */
923                                 goto do_frag;
924                         }
925                 }
926
927                 if (skb == head) {
928                         if (skb_has_frag_list(skb)) {
929                                 skb = skb_shinfo(skb)->frag_list;
930                                 goto do_frag_list;
931                         }
932                 } else if (skb->next) {
933                         skb = skb->next;
934                         goto do_frag_list;
935                 }
936
937                 /* Successfully sent the whole packet, account for it. */
938                 skb_dequeue(&sk->sk_write_queue);
939                 kfree_skb(head);
940                 sk->sk_wmem_queued -= sent;
941                 total_sent += sent;
942                 KCM_STATS_INCR(psock->stats.tx_msgs);
943         } while ((head = skb_peek(&sk->sk_write_queue)));
944 out:
945         if (!head) {
946                 /* Done with all queued messages. */
947                 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
948                 unreserve_psock(kcm);
949         }
950
951         /* Check if write space is available */
952         sk->sk_write_space(sk);
953
954         return total_sent ? : ret;
955 }
956
957 static void kcm_tx_work(struct work_struct *w)
958 {
959         struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
960         struct sock *sk = &kcm->sk;
961         int err;
962
963         lock_sock(sk);
964
965         /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
966          * aborts
967          */
968         err = kcm_write_msgs(kcm);
969         if (err < 0) {
970                 /* Hard failure in write, report error on KCM socket */
971                 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
972                 report_csk_error(&kcm->sk, -err);
973                 goto out;
974         }
975
976         /* Primarily for SOCK_SEQPACKET sockets */
977         if (likely(sk->sk_socket) &&
978             test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
979                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
980                 sk->sk_write_space(sk);
981         }
982
983 out:
984         release_sock(sk);
985 }
986
987 static void kcm_push(struct kcm_sock *kcm)
988 {
989         if (kcm->tx_wait_more)
990                 kcm_write_msgs(kcm);
991 }
992
993 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
994 {
995         struct sock *sk = sock->sk;
996         struct kcm_sock *kcm = kcm_sk(sk);
997         struct sk_buff *skb = NULL, *head = NULL;
998         size_t copy, copied = 0;
999         long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1000         int eor = (sock->type == SOCK_DGRAM) ?
1001                   !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
1002         int err = -EPIPE;
1003
1004         lock_sock(sk);
1005
1006         /* Per tcp_sendmsg this should be in poll */
1007         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1008
1009         if (sk->sk_err)
1010                 goto out_error;
1011
1012         if (kcm->seq_skb) {
1013                 /* Previously opened message */
1014                 head = kcm->seq_skb;
1015                 skb = kcm_tx_msg(head)->last_skb;
1016                 goto start;
1017         }
1018
1019         /* Call the sk_stream functions to manage the sndbuf mem. */
1020         if (!sk_stream_memory_free(sk)) {
1021                 kcm_push(kcm);
1022                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1023                 err = sk_stream_wait_memory(sk, &timeo);
1024                 if (err)
1025                         goto out_error;
1026         }
1027
1028         /* New message, alloc head skb */
1029         head = alloc_skb(0, sk->sk_allocation);
1030         while (!head) {
1031                 kcm_push(kcm);
1032                 err = sk_stream_wait_memory(sk, &timeo);
1033                 if (err)
1034                         goto out_error;
1035
1036                 head = alloc_skb(0, sk->sk_allocation);
1037         }
1038
1039         skb = head;
1040
1041         /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
1042          * csum_and_copy_from_iter from skb_do_copy_data_nocache.
1043          */
1044         skb->ip_summed = CHECKSUM_UNNECESSARY;
1045
1046 start:
1047         while (msg_data_left(msg)) {
1048                 bool merge = true;
1049                 int i = skb_shinfo(skb)->nr_frags;
1050                 struct page_frag *pfrag = sk_page_frag(sk);
1051
1052                 if (!sk_page_frag_refill(sk, pfrag))
1053                         goto wait_for_memory;
1054
1055                 if (!skb_can_coalesce(skb, i, pfrag->page,
1056                                       pfrag->offset)) {
1057                         if (i == MAX_SKB_FRAGS) {
1058                                 struct sk_buff *tskb;
1059
1060                                 tskb = alloc_skb(0, sk->sk_allocation);
1061                                 if (!tskb)
1062                                         goto wait_for_memory;
1063
1064                                 if (head == skb)
1065                                         skb_shinfo(head)->frag_list = tskb;
1066                                 else
1067                                         skb->next = tskb;
1068
1069                                 skb = tskb;
1070                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1071                                 continue;
1072                         }
1073                         merge = false;
1074                 }
1075
1076                 copy = min_t(int, msg_data_left(msg),
1077                              pfrag->size - pfrag->offset);
1078
1079                 if (!sk_wmem_schedule(sk, copy))
1080                         goto wait_for_memory;
1081
1082                 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1083                                                pfrag->page,
1084                                                pfrag->offset,
1085                                                copy);
1086                 if (err)
1087                         goto out_error;
1088
1089                 /* Update the skb. */
1090                 if (merge) {
1091                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1092                 } else {
1093                         skb_fill_page_desc(skb, i, pfrag->page,
1094                                            pfrag->offset, copy);
1095                         get_page(pfrag->page);
1096                 }
1097
1098                 pfrag->offset += copy;
1099                 copied += copy;
1100                 if (head != skb) {
1101                         head->len += copy;
1102                         head->data_len += copy;
1103                 }
1104
1105                 continue;
1106
1107 wait_for_memory:
1108                 kcm_push(kcm);
1109                 err = sk_stream_wait_memory(sk, &timeo);
1110                 if (err)
1111                         goto out_error;
1112         }
1113
1114         if (eor) {
1115                 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1116
1117                 /* Message complete, queue it on send buffer */
1118                 __skb_queue_tail(&sk->sk_write_queue, head);
1119                 kcm->seq_skb = NULL;
1120                 KCM_STATS_INCR(kcm->stats.tx_msgs);
1121
1122                 if (msg->msg_flags & MSG_BATCH) {
1123                         kcm->tx_wait_more = true;
1124                 } else if (kcm->tx_wait_more || not_busy) {
1125                         err = kcm_write_msgs(kcm);
1126                         if (err < 0) {
1127                                 /* We got a hard error in write_msgs but have
1128                                  * already queued this message. Report an error
1129                                  * in the socket, but don't affect return value
1130                                  * from sendmsg
1131                                  */
1132                                 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1133                                 report_csk_error(&kcm->sk, -err);
1134                         }
1135                 }
1136         } else {
1137                 /* Message not complete, save state */
1138 partial_message:
1139                 kcm->seq_skb = head;
1140                 kcm_tx_msg(head)->last_skb = skb;
1141         }
1142
1143         KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1144
1145         release_sock(sk);
1146         return copied;
1147
1148 out_error:
1149         kcm_push(kcm);
1150
1151         if (copied && sock->type == SOCK_SEQPACKET) {
1152                 /* Wrote some bytes before encountering an
1153                  * error, return partial success.
1154                  */
1155                 goto partial_message;
1156         }
1157
1158         if (head != kcm->seq_skb)
1159                 kfree_skb(head);
1160
1161         err = sk_stream_error(sk, msg->msg_flags, err);
1162
1163         /* make sure we wake any epoll edge trigger waiter */
1164         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1165                 sk->sk_write_space(sk);
1166
1167         release_sock(sk);
1168         return err;
1169 }
1170
1171 static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1172                                      long timeo, int *err)
1173 {
1174         struct sk_buff *skb;
1175
1176         while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1177                 if (sk->sk_err) {
1178                         *err = sock_error(sk);
1179                         return NULL;
1180                 }
1181
1182                 if (sock_flag(sk, SOCK_DONE))
1183                         return NULL;
1184
1185                 if ((flags & MSG_DONTWAIT) || !timeo) {
1186                         *err = -EAGAIN;
1187                         return NULL;
1188                 }
1189
1190                 sk_wait_data(sk, &timeo, NULL);
1191
1192                 /* Handle signals */
1193                 if (signal_pending(current)) {
1194                         *err = sock_intr_errno(timeo);
1195                         return NULL;
1196                 }
1197         }
1198
1199         return skb;
1200 }
1201
1202 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1203                        size_t len, int flags)
1204 {
1205         struct sock *sk = sock->sk;
1206         struct kcm_sock *kcm = kcm_sk(sk);
1207         int err = 0;
1208         long timeo;
1209         struct kcm_rx_msg *rxm;
1210         int copied = 0;
1211         struct sk_buff *skb;
1212
1213         timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1214
1215         lock_sock(sk);
1216
1217         skb = kcm_wait_data(sk, flags, timeo, &err);
1218         if (!skb)
1219                 goto out;
1220
1221         /* Okay, have a message on the receive queue */
1222
1223         rxm = kcm_rx_msg(skb);
1224
1225         if (len > rxm->full_len)
1226                 len = rxm->full_len;
1227
1228         err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1229         if (err < 0)
1230                 goto out;
1231
1232         copied = len;
1233         if (likely(!(flags & MSG_PEEK))) {
1234                 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1235                 if (copied < rxm->full_len) {
1236                         if (sock->type == SOCK_DGRAM) {
1237                                 /* Truncated message */
1238                                 msg->msg_flags |= MSG_TRUNC;
1239                                 goto msg_finished;
1240                         }
1241                         rxm->offset += copied;
1242                         rxm->full_len -= copied;
1243                 } else {
1244 msg_finished:
1245                         /* Finished with message */
1246                         msg->msg_flags |= MSG_EOR;
1247                         KCM_STATS_INCR(kcm->stats.rx_msgs);
1248                         skb_unlink(skb, &sk->sk_receive_queue);
1249                         kfree_skb(skb);
1250                 }
1251         }
1252
1253 out:
1254         release_sock(sk);
1255
1256         return copied ? : err;
1257 }
1258
1259 /* kcm sock lock held */
1260 static void kcm_recv_disable(struct kcm_sock *kcm)
1261 {
1262         struct kcm_mux *mux = kcm->mux;
1263
1264         if (kcm->rx_disabled)
1265                 return;
1266
1267         spin_lock_bh(&mux->rx_lock);
1268
1269         kcm->rx_disabled = 1;
1270
1271         /* If a psock is reserved we'll do cleanup in unreserve */
1272         if (!kcm->rx_psock) {
1273                 if (kcm->rx_wait) {
1274                         list_del(&kcm->wait_rx_list);
1275                         kcm->rx_wait = false;
1276                 }
1277
1278                 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1279         }
1280
1281         spin_unlock_bh(&mux->rx_lock);
1282 }
1283
1284 /* kcm sock lock held */
1285 static void kcm_recv_enable(struct kcm_sock *kcm)
1286 {
1287         struct kcm_mux *mux = kcm->mux;
1288
1289         if (!kcm->rx_disabled)
1290                 return;
1291
1292         spin_lock_bh(&mux->rx_lock);
1293
1294         kcm->rx_disabled = 0;
1295         kcm_rcv_ready(kcm);
1296
1297         spin_unlock_bh(&mux->rx_lock);
1298 }
1299
1300 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1301                           char __user *optval, unsigned int optlen)
1302 {
1303         struct kcm_sock *kcm = kcm_sk(sock->sk);
1304         int val, valbool;
1305         int err = 0;
1306
1307         if (level != SOL_KCM)
1308                 return -ENOPROTOOPT;
1309
1310         if (optlen < sizeof(int))
1311                 return -EINVAL;
1312
1313         if (get_user(val, (int __user *)optval))
1314                 return -EINVAL;
1315
1316         valbool = val ? 1 : 0;
1317
1318         switch (optname) {
1319         case KCM_RECV_DISABLE:
1320                 lock_sock(&kcm->sk);
1321                 if (valbool)
1322                         kcm_recv_disable(kcm);
1323                 else
1324                         kcm_recv_enable(kcm);
1325                 release_sock(&kcm->sk);
1326                 break;
1327         default:
1328                 err = -ENOPROTOOPT;
1329         }
1330
1331         return err;
1332 }
1333
1334 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1335                           char __user *optval, int __user *optlen)
1336 {
1337         struct kcm_sock *kcm = kcm_sk(sock->sk);
1338         int val, len;
1339
1340         if (level != SOL_KCM)
1341                 return -ENOPROTOOPT;
1342
1343         if (get_user(len, optlen))
1344                 return -EFAULT;
1345
1346         len = min_t(unsigned int, len, sizeof(int));
1347         if (len < 0)
1348                 return -EINVAL;
1349
1350         switch (optname) {
1351         case KCM_RECV_DISABLE:
1352                 val = kcm->rx_disabled;
1353                 break;
1354         default:
1355                 return -ENOPROTOOPT;
1356         }
1357
1358         if (put_user(len, optlen))
1359                 return -EFAULT;
1360         if (copy_to_user(optval, &val, len))
1361                 return -EFAULT;
1362         return 0;
1363 }
1364
1365 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1366 {
1367         struct kcm_sock *tkcm;
1368         struct list_head *head;
1369         int index = 0;
1370
1371         /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1372          * we set sk_state, otherwise epoll_wait always returns right away with
1373          * POLLHUP
1374          */
1375         kcm->sk.sk_state = TCP_ESTABLISHED;
1376
1377         /* Add to mux's kcm sockets list */
1378         kcm->mux = mux;
1379         spin_lock_bh(&mux->lock);
1380
1381         head = &mux->kcm_socks;
1382         list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1383                 if (tkcm->index != index)
1384                         break;
1385                 head = &tkcm->kcm_sock_list;
1386                 index++;
1387         }
1388
1389         list_add(&kcm->kcm_sock_list, head);
1390         kcm->index = index;
1391
1392         mux->kcm_socks_cnt++;
1393         spin_unlock_bh(&mux->lock);
1394
1395         INIT_WORK(&kcm->tx_work, kcm_tx_work);
1396
1397         spin_lock_bh(&mux->rx_lock);
1398         kcm_rcv_ready(kcm);
1399         spin_unlock_bh(&mux->rx_lock);
1400 }
1401
1402 static int kcm_attach(struct socket *sock, struct socket *csock,
1403                       struct bpf_prog *prog)
1404 {
1405         struct kcm_sock *kcm = kcm_sk(sock->sk);
1406         struct kcm_mux *mux = kcm->mux;
1407         struct sock *csk;
1408         struct kcm_psock *psock = NULL, *tpsock;
1409         struct list_head *head;
1410         int index = 0;
1411
1412         if (csock->ops->family != PF_INET &&
1413             csock->ops->family != PF_INET6)
1414                 return -EINVAL;
1415
1416         csk = csock->sk;
1417         if (!csk)
1418                 return -EINVAL;
1419
1420         /* Only support TCP for now */
1421         if (csk->sk_protocol != IPPROTO_TCP)
1422                 return -EINVAL;
1423
1424         psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1425         if (!psock)
1426                 return -ENOMEM;
1427
1428         psock->mux = mux;
1429         psock->sk = csk;
1430         psock->bpf_prog = prog;
1431         INIT_WORK(&psock->rx_work, psock_rx_work);
1432         INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work);
1433
1434         sock_hold(csk);
1435
1436         write_lock_bh(&csk->sk_callback_lock);
1437         psock->save_data_ready = csk->sk_data_ready;
1438         psock->save_write_space = csk->sk_write_space;
1439         psock->save_state_change = csk->sk_state_change;
1440         csk->sk_user_data = psock;
1441         csk->sk_data_ready = psock_tcp_data_ready;
1442         csk->sk_write_space = psock_tcp_write_space;
1443         csk->sk_state_change = psock_tcp_state_change;
1444         write_unlock_bh(&csk->sk_callback_lock);
1445
1446         /* Finished initialization, now add the psock to the MUX. */
1447         spin_lock_bh(&mux->lock);
1448         head = &mux->psocks;
1449         list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1450                 if (tpsock->index != index)
1451                         break;
1452                 head = &tpsock->psock_list;
1453                 index++;
1454         }
1455
1456         list_add(&psock->psock_list, head);
1457         psock->index = index;
1458
1459         KCM_STATS_INCR(mux->stats.psock_attach);
1460         mux->psocks_cnt++;
1461         psock_now_avail(psock);
1462         spin_unlock_bh(&mux->lock);
1463
1464         /* Schedule RX work in case there are already bytes queued */
1465         queue_work(kcm_wq, &psock->rx_work);
1466
1467         return 0;
1468 }
1469
1470 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1471 {
1472         struct socket *csock;
1473         struct bpf_prog *prog;
1474         int err;
1475
1476         csock = sockfd_lookup(info->fd, &err);
1477         if (!csock)
1478                 return -ENOENT;
1479
1480         prog = bpf_prog_get(info->bpf_fd);
1481         if (IS_ERR(prog)) {
1482                 err = PTR_ERR(prog);
1483                 goto out;
1484         }
1485
1486         if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1487                 bpf_prog_put(prog);
1488                 err = -EINVAL;
1489                 goto out;
1490         }
1491
1492         err = kcm_attach(sock, csock, prog);
1493         if (err) {
1494                 bpf_prog_put(prog);
1495                 goto out;
1496         }
1497
1498         /* Keep reference on file also */
1499
1500         return 0;
1501 out:
1502         fput(csock->file);
1503         return err;
1504 }
1505
1506 static void kcm_unattach(struct kcm_psock *psock)
1507 {
1508         struct sock *csk = psock->sk;
1509         struct kcm_mux *mux = psock->mux;
1510
1511         /* Stop getting callbacks from TCP socket. After this there should
1512          * be no way to reserve a kcm for this psock.
1513          */
1514         write_lock_bh(&csk->sk_callback_lock);
1515         csk->sk_user_data = NULL;
1516         csk->sk_data_ready = psock->save_data_ready;
1517         csk->sk_write_space = psock->save_write_space;
1518         csk->sk_state_change = psock->save_state_change;
1519         psock->rx_stopped = 1;
1520
1521         if (WARN_ON(psock->rx_kcm)) {
1522                 write_unlock_bh(&csk->sk_callback_lock);
1523                 return;
1524         }
1525
1526         spin_lock_bh(&mux->rx_lock);
1527
1528         /* Stop receiver activities. After this point psock should not be
1529          * able to get onto ready list either through callbacks or work.
1530          */
1531         if (psock->ready_rx_msg) {
1532                 list_del(&psock->psock_ready_list);
1533                 kfree_skb(psock->ready_rx_msg);
1534                 psock->ready_rx_msg = NULL;
1535                 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1536         }
1537
1538         spin_unlock_bh(&mux->rx_lock);
1539
1540         write_unlock_bh(&csk->sk_callback_lock);
1541
1542         cancel_work_sync(&psock->rx_work);
1543         cancel_delayed_work_sync(&psock->rx_delayed_work);
1544
1545         bpf_prog_put(psock->bpf_prog);
1546
1547         kfree_skb(psock->rx_skb_head);
1548         psock->rx_skb_head = NULL;
1549
1550         spin_lock_bh(&mux->lock);
1551
1552         aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1553
1554         KCM_STATS_INCR(mux->stats.psock_unattach);
1555
1556         if (psock->tx_kcm) {
1557                 /* psock was reserved.  Just mark it finished and we will clean
1558                  * up in the kcm paths, we need kcm lock which can not be
1559                  * acquired here.
1560                  */
1561                 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1562                 spin_unlock_bh(&mux->lock);
1563
1564                 /* We are unattaching a socket that is reserved. Abort the
1565                  * socket since we may be out of sync in sending on it. We need
1566                  * to do this without the mux lock.
1567                  */
1568                 kcm_abort_tx_psock(psock, EPIPE, false);
1569
1570                 spin_lock_bh(&mux->lock);
1571                 if (!psock->tx_kcm) {
1572                         /* psock now unreserved in window mux was unlocked */
1573                         goto no_reserved;
1574                 }
1575                 psock->done = 1;
1576
1577                 /* Commit done before queuing work to process it */
1578                 smp_mb();
1579
1580                 /* Queue tx work to make sure psock->done is handled */
1581                 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1582                 spin_unlock_bh(&mux->lock);
1583         } else {
1584 no_reserved:
1585                 if (!psock->tx_stopped)
1586                         list_del(&psock->psock_avail_list);
1587                 list_del(&psock->psock_list);
1588                 mux->psocks_cnt--;
1589                 spin_unlock_bh(&mux->lock);
1590
1591                 sock_put(csk);
1592                 fput(csk->sk_socket->file);
1593                 kmem_cache_free(kcm_psockp, psock);
1594         }
1595 }
1596
1597 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1598 {
1599         struct kcm_sock *kcm = kcm_sk(sock->sk);
1600         struct kcm_mux *mux = kcm->mux;
1601         struct kcm_psock *psock;
1602         struct socket *csock;
1603         struct sock *csk;
1604         int err;
1605
1606         csock = sockfd_lookup(info->fd, &err);
1607         if (!csock)
1608                 return -ENOENT;
1609
1610         csk = csock->sk;
1611         if (!csk) {
1612                 err = -EINVAL;
1613                 goto out;
1614         }
1615
1616         err = -ENOENT;
1617
1618         spin_lock_bh(&mux->lock);
1619
1620         list_for_each_entry(psock, &mux->psocks, psock_list) {
1621                 if (psock->sk != csk)
1622                         continue;
1623
1624                 /* Found the matching psock */
1625
1626                 if (psock->unattaching || WARN_ON(psock->done)) {
1627                         err = -EALREADY;
1628                         break;
1629                 }
1630
1631                 psock->unattaching = 1;
1632
1633                 spin_unlock_bh(&mux->lock);
1634
1635                 kcm_unattach(psock);
1636
1637                 err = 0;
1638                 goto out;
1639         }
1640
1641         spin_unlock_bh(&mux->lock);
1642
1643 out:
1644         fput(csock->file);
1645         return err;
1646 }
1647
1648 static struct proto kcm_proto = {
1649         .name   = "KCM",
1650         .owner  = THIS_MODULE,
1651         .obj_size = sizeof(struct kcm_sock),
1652 };
1653
1654 /* Clone a kcm socket. */
1655 static int kcm_clone(struct socket *osock, struct kcm_clone *info,
1656                      struct socket **newsockp)
1657 {
1658         struct socket *newsock;
1659         struct sock *newsk;
1660         struct file *newfile;
1661         int err, newfd;
1662
1663         err = -ENFILE;
1664         newsock = sock_alloc();
1665         if (!newsock)
1666                 goto out;
1667
1668         newsock->type = osock->type;
1669         newsock->ops = osock->ops;
1670
1671         __module_get(newsock->ops->owner);
1672
1673         newfd = get_unused_fd_flags(0);
1674         if (unlikely(newfd < 0)) {
1675                 err = newfd;
1676                 goto out_fd_fail;
1677         }
1678
1679         newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1680         if (unlikely(IS_ERR(newfile))) {
1681                 err = PTR_ERR(newfile);
1682                 goto out_sock_alloc_fail;
1683         }
1684
1685         newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1686                          &kcm_proto, true);
1687         if (!newsk) {
1688                 err = -ENOMEM;
1689                 goto out_sk_alloc_fail;
1690         }
1691
1692         sock_init_data(newsock, newsk);
1693         init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1694
1695         fd_install(newfd, newfile);
1696         *newsockp = newsock;
1697         info->fd = newfd;
1698
1699         return 0;
1700
1701 out_sk_alloc_fail:
1702         fput(newfile);
1703 out_sock_alloc_fail:
1704         put_unused_fd(newfd);
1705 out_fd_fail:
1706         sock_release(newsock);
1707 out:
1708         return err;
1709 }
1710
1711 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1712 {
1713         int err;
1714
1715         switch (cmd) {
1716         case SIOCKCMATTACH: {
1717                 struct kcm_attach info;
1718
1719                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1720                         err = -EFAULT;
1721
1722                 err = kcm_attach_ioctl(sock, &info);
1723
1724                 break;
1725         }
1726         case SIOCKCMUNATTACH: {
1727                 struct kcm_unattach info;
1728
1729                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1730                         err = -EFAULT;
1731
1732                 err = kcm_unattach_ioctl(sock, &info);
1733
1734                 break;
1735         }
1736         case SIOCKCMCLONE: {
1737                 struct kcm_clone info;
1738                 struct socket *newsock = NULL;
1739
1740                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1741                         err = -EFAULT;
1742
1743                 err = kcm_clone(sock, &info, &newsock);
1744
1745                 if (!err) {
1746                         if (copy_to_user((void __user *)arg, &info,
1747                                          sizeof(info))) {
1748                                 err = -EFAULT;
1749                                 sock_release(newsock);
1750                         }
1751                 }
1752
1753                 break;
1754         }
1755         default:
1756                 err = -ENOIOCTLCMD;
1757                 break;
1758         }
1759
1760         return err;
1761 }
1762
1763 static void free_mux(struct rcu_head *rcu)
1764 {
1765         struct kcm_mux *mux = container_of(rcu,
1766             struct kcm_mux, rcu);
1767
1768         kmem_cache_free(kcm_muxp, mux);
1769 }
1770
1771 static void release_mux(struct kcm_mux *mux)
1772 {
1773         struct kcm_net *knet = mux->knet;
1774         struct kcm_psock *psock, *tmp_psock;
1775
1776         /* Release psocks */
1777         list_for_each_entry_safe(psock, tmp_psock,
1778                                  &mux->psocks, psock_list) {
1779                 if (!WARN_ON(psock->unattaching))
1780                         kcm_unattach(psock);
1781         }
1782
1783         if (WARN_ON(mux->psocks_cnt))
1784                 return;
1785
1786         __skb_queue_purge(&mux->rx_hold_queue);
1787
1788         mutex_lock(&knet->mutex);
1789         aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1790         aggregate_psock_stats(&mux->aggregate_psock_stats,
1791                               &knet->aggregate_psock_stats);
1792         list_del_rcu(&mux->kcm_mux_list);
1793         knet->count--;
1794         mutex_unlock(&knet->mutex);
1795
1796         call_rcu(&mux->rcu, free_mux);
1797 }
1798
1799 static void kcm_done(struct kcm_sock *kcm)
1800 {
1801         struct kcm_mux *mux = kcm->mux;
1802         struct sock *sk = &kcm->sk;
1803         int socks_cnt;
1804
1805         spin_lock_bh(&mux->rx_lock);
1806         if (kcm->rx_psock) {
1807                 /* Cleanup in unreserve_rx_kcm */
1808                 WARN_ON(kcm->done);
1809                 kcm->rx_disabled = 1;
1810                 kcm->done = 1;
1811                 spin_unlock_bh(&mux->rx_lock);
1812                 return;
1813         }
1814
1815         if (kcm->rx_wait) {
1816                 list_del(&kcm->wait_rx_list);
1817                 kcm->rx_wait = false;
1818         }
1819         /* Move any pending receive messages to other kcm sockets */
1820         requeue_rx_msgs(mux, &sk->sk_receive_queue);
1821
1822         spin_unlock_bh(&mux->rx_lock);
1823
1824         if (WARN_ON(sk_rmem_alloc_get(sk)))
1825                 return;
1826
1827         /* Detach from MUX */
1828         spin_lock_bh(&mux->lock);
1829
1830         list_del(&kcm->kcm_sock_list);
1831         mux->kcm_socks_cnt--;
1832         socks_cnt = mux->kcm_socks_cnt;
1833
1834         spin_unlock_bh(&mux->lock);
1835
1836         if (!socks_cnt) {
1837                 /* We are done with the mux now. */
1838                 release_mux(mux);
1839         }
1840
1841         WARN_ON(kcm->rx_wait);
1842
1843         sock_put(&kcm->sk);
1844 }
1845
1846 /* Called by kcm_release to close a KCM socket.
1847  * If this is the last KCM socket on the MUX, destroy the MUX.
1848  */
1849 static int kcm_release(struct socket *sock)
1850 {
1851         struct sock *sk = sock->sk;
1852         struct kcm_sock *kcm;
1853         struct kcm_mux *mux;
1854         struct kcm_psock *psock;
1855
1856         if (!sk)
1857                 return 0;
1858
1859         kcm = kcm_sk(sk);
1860         mux = kcm->mux;
1861
1862         sock_orphan(sk);
1863         kfree_skb(kcm->seq_skb);
1864
1865         lock_sock(sk);
1866         /* Purge queue under lock to avoid race condition with tx_work trying
1867          * to act when queue is nonempty. If tx_work runs after this point
1868          * it will just return.
1869          */
1870         __skb_queue_purge(&sk->sk_write_queue);
1871         release_sock(sk);
1872
1873         spin_lock_bh(&mux->lock);
1874         if (kcm->tx_wait) {
1875                 /* Take of tx_wait list, after this point there should be no way
1876                  * that a psock will be assigned to this kcm.
1877                  */
1878                 list_del(&kcm->wait_psock_list);
1879                 kcm->tx_wait = false;
1880         }
1881         spin_unlock_bh(&mux->lock);
1882
1883         /* Cancel work. After this point there should be no outside references
1884          * to the kcm socket.
1885          */
1886         cancel_work_sync(&kcm->tx_work);
1887
1888         lock_sock(sk);
1889         psock = kcm->tx_psock;
1890         if (psock) {
1891                 /* A psock was reserved, so we need to kill it since it
1892                  * may already have some bytes queued from a message. We
1893                  * need to do this after removing kcm from tx_wait list.
1894                  */
1895                 kcm_abort_tx_psock(psock, EPIPE, false);
1896                 unreserve_psock(kcm);
1897         }
1898         release_sock(sk);
1899
1900         WARN_ON(kcm->tx_wait);
1901         WARN_ON(kcm->tx_psock);
1902
1903         sock->sk = NULL;
1904
1905         kcm_done(kcm);
1906
1907         return 0;
1908 }
1909
1910 static const struct proto_ops kcm_ops = {
1911         .family =       PF_KCM,
1912         .owner =        THIS_MODULE,
1913         .release =      kcm_release,
1914         .bind =         sock_no_bind,
1915         .connect =      sock_no_connect,
1916         .socketpair =   sock_no_socketpair,
1917         .accept =       sock_no_accept,
1918         .getname =      sock_no_getname,
1919         .poll =         datagram_poll,
1920         .ioctl =        kcm_ioctl,
1921         .listen =       sock_no_listen,
1922         .shutdown =     sock_no_shutdown,
1923         .setsockopt =   kcm_setsockopt,
1924         .getsockopt =   kcm_getsockopt,
1925         .sendmsg =      kcm_sendmsg,
1926         .recvmsg =      kcm_recvmsg,
1927         .mmap =         sock_no_mmap,
1928         .sendpage =     sock_no_sendpage,
1929 };
1930
1931 /* Create proto operation for kcm sockets */
1932 static int kcm_create(struct net *net, struct socket *sock,
1933                       int protocol, int kern)
1934 {
1935         struct kcm_net *knet = net_generic(net, kcm_net_id);
1936         struct sock *sk;
1937         struct kcm_mux *mux;
1938
1939         switch (sock->type) {
1940         case SOCK_DGRAM:
1941         case SOCK_SEQPACKET:
1942                 sock->ops = &kcm_ops;
1943                 break;
1944         default:
1945                 return -ESOCKTNOSUPPORT;
1946         }
1947
1948         if (protocol != KCMPROTO_CONNECTED)
1949                 return -EPROTONOSUPPORT;
1950
1951         sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1952         if (!sk)
1953                 return -ENOMEM;
1954
1955         /* Allocate a kcm mux, shared between KCM sockets */
1956         mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1957         if (!mux) {
1958                 sk_free(sk);
1959                 return -ENOMEM;
1960         }
1961
1962         spin_lock_init(&mux->lock);
1963         spin_lock_init(&mux->rx_lock);
1964         INIT_LIST_HEAD(&mux->kcm_socks);
1965         INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1966         INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1967
1968         INIT_LIST_HEAD(&mux->psocks);
1969         INIT_LIST_HEAD(&mux->psocks_ready);
1970         INIT_LIST_HEAD(&mux->psocks_avail);
1971
1972         mux->knet = knet;
1973
1974         /* Add new MUX to list */
1975         mutex_lock(&knet->mutex);
1976         list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1977         knet->count++;
1978         mutex_unlock(&knet->mutex);
1979
1980         skb_queue_head_init(&mux->rx_hold_queue);
1981
1982         /* Init KCM socket */
1983         sock_init_data(sock, sk);
1984         init_kcm_sock(kcm_sk(sk), mux);
1985
1986         return 0;
1987 }
1988
1989 static struct net_proto_family kcm_family_ops = {
1990         .family = PF_KCM,
1991         .create = kcm_create,
1992         .owner  = THIS_MODULE,
1993 };
1994
1995 static __net_init int kcm_init_net(struct net *net)
1996 {
1997         struct kcm_net *knet = net_generic(net, kcm_net_id);
1998
1999         INIT_LIST_HEAD_RCU(&knet->mux_list);
2000         mutex_init(&knet->mutex);
2001
2002         return 0;
2003 }
2004
2005 static __net_exit void kcm_exit_net(struct net *net)
2006 {
2007         struct kcm_net *knet = net_generic(net, kcm_net_id);
2008
2009         /* All KCM sockets should be closed at this point, which should mean
2010          * that all multiplexors and psocks have been destroyed.
2011          */
2012         WARN_ON(!list_empty(&knet->mux_list));
2013 }
2014
2015 static struct pernet_operations kcm_net_ops = {
2016         .init = kcm_init_net,
2017         .exit = kcm_exit_net,
2018         .id   = &kcm_net_id,
2019         .size = sizeof(struct kcm_net),
2020 };
2021
2022 static int __init kcm_init(void)
2023 {
2024         int err = -ENOMEM;
2025
2026         kcm_muxp = kmem_cache_create("kcm_mux_cache",
2027                                      sizeof(struct kcm_mux), 0,
2028                                      SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2029         if (!kcm_muxp)
2030                 goto fail;
2031
2032         kcm_psockp = kmem_cache_create("kcm_psock_cache",
2033                                        sizeof(struct kcm_psock), 0,
2034                                         SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2035         if (!kcm_psockp)
2036                 goto fail;
2037
2038         kcm_wq = create_singlethread_workqueue("kkcmd");
2039         if (!kcm_wq)
2040                 goto fail;
2041
2042         err = proto_register(&kcm_proto, 1);
2043         if (err)
2044                 goto fail;
2045
2046         err = sock_register(&kcm_family_ops);
2047         if (err)
2048                 goto sock_register_fail;
2049
2050         err = register_pernet_device(&kcm_net_ops);
2051         if (err)
2052                 goto net_ops_fail;
2053
2054         err = kcm_proc_init();
2055         if (err)
2056                 goto proc_init_fail;
2057
2058         return 0;
2059
2060 proc_init_fail:
2061         unregister_pernet_device(&kcm_net_ops);
2062
2063 net_ops_fail:
2064         sock_unregister(PF_KCM);
2065
2066 sock_register_fail:
2067         proto_unregister(&kcm_proto);
2068
2069 fail:
2070         kmem_cache_destroy(kcm_muxp);
2071         kmem_cache_destroy(kcm_psockp);
2072
2073         if (kcm_wq)
2074                 destroy_workqueue(kcm_wq);
2075
2076         return err;
2077 }
2078
2079 static void __exit kcm_exit(void)
2080 {
2081         kcm_proc_exit();
2082         unregister_pernet_device(&kcm_net_ops);
2083         sock_unregister(PF_KCM);
2084         proto_unregister(&kcm_proto);
2085         destroy_workqueue(kcm_wq);
2086
2087         kmem_cache_destroy(kcm_muxp);
2088         kmem_cache_destroy(kcm_psockp);
2089 }
2090
2091 module_init(kcm_init);
2092 module_exit(kcm_exit);
2093
2094 MODULE_LICENSE("GPL");
2095 MODULE_ALIAS_NETPROTO(PF_KCM);
2096