]> git.karo-electronics.de Git - mv-sheeva.git/blob - net/iucv/af_iucv.c
fbce4a3126de59ed4fc99e8a3b34083f02d92ffb
[mv-sheeva.git] / net / iucv / af_iucv.c
1 /*
2  *  IUCV protocol stack for Linux on zSeries
3  *
4  *  Copyright IBM Corp. 2006, 2009
5  *
6  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
7  *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8  *  PM functions:
9  *              Ursula Braun <ursula.braun@de.ibm.com>
10  */
11
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <net/sock.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
29
30 #include <net/iucv/af_iucv.h>
31
32 #define VERSION "1.2"
33
34 static char iucv_userid[80];
35
36 static const struct proto_ops iucv_sock_ops;
37
38 static struct proto iucv_proto = {
39         .name           = "AF_IUCV",
40         .owner          = THIS_MODULE,
41         .obj_size       = sizeof(struct iucv_sock),
42 };
43
44 static struct iucv_interface *pr_iucv;
45
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48         {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49
50 #define TRGCLS_SIZE     (sizeof(((struct iucv_message *)0)->class))
51
52 /* macros to set/get socket control buffer at correct offset */
53 #define CB_TAG(skb)     ((skb)->cb)             /* iucv message tag */
54 #define CB_TAG_LEN      (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb)  ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56 #define CB_TRGCLS_LEN   (TRGCLS_SIZE)
57
58 #define __iucv_sock_wait(sk, condition, timeo, ret)                     \
59 do {                                                                    \
60         DEFINE_WAIT(__wait);                                            \
61         long __timeo = timeo;                                           \
62         ret = 0;                                                        \
63         prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
64         while (!(condition)) {                                          \
65                 if (!__timeo) {                                         \
66                         ret = -EAGAIN;                                  \
67                         break;                                          \
68                 }                                                       \
69                 if (signal_pending(current)) {                          \
70                         ret = sock_intr_errno(__timeo);                 \
71                         break;                                          \
72                 }                                                       \
73                 release_sock(sk);                                       \
74                 __timeo = schedule_timeout(__timeo);                    \
75                 lock_sock(sk);                                          \
76                 ret = sock_error(sk);                                   \
77                 if (ret)                                                \
78                         break;                                          \
79         }                                                               \
80         finish_wait(sk_sleep(sk), &__wait);                             \
81 } while (0)
82
83 #define iucv_sock_wait(sk, condition, timeo)                            \
84 ({                                                                      \
85         int __ret = 0;                                                  \
86         if (!(condition))                                               \
87                 __iucv_sock_wait(sk, condition, timeo, __ret);          \
88         __ret;                                                          \
89 })
90
91 static void iucv_sock_kill(struct sock *sk);
92 static void iucv_sock_close(struct sock *sk);
93 static void iucv_sever_path(struct sock *, int);
94
95 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
96         struct packet_type *pt, struct net_device *orig_dev);
97 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
98                    struct sk_buff *skb, u8 flags);
99 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
100
101 /* Call Back functions */
102 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
103 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
104 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
105 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
106                                  u8 ipuser[16]);
107 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
108 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
109
110 static struct iucv_sock_list iucv_sk_list = {
111         .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
112         .autobind_name = ATOMIC_INIT(0)
113 };
114
115 static struct iucv_handler af_iucv_handler = {
116         .path_pending     = iucv_callback_connreq,
117         .path_complete    = iucv_callback_connack,
118         .path_severed     = iucv_callback_connrej,
119         .message_pending  = iucv_callback_rx,
120         .message_complete = iucv_callback_txdone,
121         .path_quiesced    = iucv_callback_shutdown,
122 };
123
124 static inline void high_nmcpy(unsigned char *dst, char *src)
125 {
126        memcpy(dst, src, 8);
127 }
128
129 static inline void low_nmcpy(unsigned char *dst, char *src)
130 {
131        memcpy(&dst[8], src, 8);
132 }
133
134 static int afiucv_pm_prepare(struct device *dev)
135 {
136 #ifdef CONFIG_PM_DEBUG
137         printk(KERN_WARNING "afiucv_pm_prepare\n");
138 #endif
139         return 0;
140 }
141
142 static void afiucv_pm_complete(struct device *dev)
143 {
144 #ifdef CONFIG_PM_DEBUG
145         printk(KERN_WARNING "afiucv_pm_complete\n");
146 #endif
147 }
148
149 /**
150  * afiucv_pm_freeze() - Freeze PM callback
151  * @dev:        AFIUCV dummy device
152  *
153  * Sever all established IUCV communication pathes
154  */
155 static int afiucv_pm_freeze(struct device *dev)
156 {
157         struct iucv_sock *iucv;
158         struct sock *sk;
159         struct hlist_node *node;
160         int err = 0;
161
162 #ifdef CONFIG_PM_DEBUG
163         printk(KERN_WARNING "afiucv_pm_freeze\n");
164 #endif
165         read_lock(&iucv_sk_list.lock);
166         sk_for_each(sk, node, &iucv_sk_list.head) {
167                 iucv = iucv_sk(sk);
168                 skb_queue_purge(&iucv->send_skb_q);
169                 skb_queue_purge(&iucv->backlog_skb_q);
170                 switch (sk->sk_state) {
171                 case IUCV_DISCONN:
172                 case IUCV_CLOSING:
173                 case IUCV_CONNECTED:
174                         iucv_sever_path(sk, 0);
175                         break;
176                 case IUCV_OPEN:
177                 case IUCV_BOUND:
178                 case IUCV_LISTEN:
179                 case IUCV_CLOSED:
180                 default:
181                         break;
182                 }
183                 skb_queue_purge(&iucv->send_skb_q);
184                 skb_queue_purge(&iucv->backlog_skb_q);
185         }
186         read_unlock(&iucv_sk_list.lock);
187         return err;
188 }
189
190 /**
191  * afiucv_pm_restore_thaw() - Thaw and restore PM callback
192  * @dev:        AFIUCV dummy device
193  *
194  * socket clean up after freeze
195  */
196 static int afiucv_pm_restore_thaw(struct device *dev)
197 {
198         struct sock *sk;
199         struct hlist_node *node;
200
201 #ifdef CONFIG_PM_DEBUG
202         printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
203 #endif
204         read_lock(&iucv_sk_list.lock);
205         sk_for_each(sk, node, &iucv_sk_list.head) {
206                 switch (sk->sk_state) {
207                 case IUCV_CONNECTED:
208                         sk->sk_err = EPIPE;
209                         sk->sk_state = IUCV_DISCONN;
210                         sk->sk_state_change(sk);
211                         break;
212                 case IUCV_DISCONN:
213                 case IUCV_CLOSING:
214                 case IUCV_LISTEN:
215                 case IUCV_BOUND:
216                 case IUCV_OPEN:
217                 default:
218                         break;
219                 }
220         }
221         read_unlock(&iucv_sk_list.lock);
222         return 0;
223 }
224
225 static const struct dev_pm_ops afiucv_pm_ops = {
226         .prepare = afiucv_pm_prepare,
227         .complete = afiucv_pm_complete,
228         .freeze = afiucv_pm_freeze,
229         .thaw = afiucv_pm_restore_thaw,
230         .restore = afiucv_pm_restore_thaw,
231 };
232
233 static struct device_driver af_iucv_driver = {
234         .owner = THIS_MODULE,
235         .name = "afiucv",
236         .bus  = NULL,
237         .pm   = &afiucv_pm_ops,
238 };
239
240 /* dummy device used as trigger for PM functions */
241 static struct device *af_iucv_dev;
242
243 /**
244  * iucv_msg_length() - Returns the length of an iucv message.
245  * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
246  *
247  * The function returns the length of the specified iucv message @msg of data
248  * stored in a buffer and of data stored in the parameter list (PRMDATA).
249  *
250  * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
251  * data:
252  *      PRMDATA[0..6]   socket data (max 7 bytes);
253  *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
254  *
255  * The socket data length is computed by subtracting the socket data length
256  * value from 0xFF.
257  * If the socket data len is greater 7, then PRMDATA can be used for special
258  * notifications (see iucv_sock_shutdown); and further,
259  * if the socket data len is > 7, the function returns 8.
260  *
261  * Use this function to allocate socket buffers to store iucv message data.
262  */
263 static inline size_t iucv_msg_length(struct iucv_message *msg)
264 {
265         size_t datalen;
266
267         if (msg->flags & IUCV_IPRMDATA) {
268                 datalen = 0xff - msg->rmmsg[7];
269                 return (datalen < 8) ? datalen : 8;
270         }
271         return msg->length;
272 }
273
274 /**
275  * iucv_sock_in_state() - check for specific states
276  * @sk:         sock structure
277  * @state:      first iucv sk state
278  * @state:      second iucv sk state
279  *
280  * Returns true if the socket in either in the first or second state.
281  */
282 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
283 {
284         return (sk->sk_state == state || sk->sk_state == state2);
285 }
286
287 /**
288  * iucv_below_msglim() - function to check if messages can be sent
289  * @sk:         sock structure
290  *
291  * Returns true if the send queue length is lower than the message limit.
292  * Always returns true if the socket is not connected (no iucv path for
293  * checking the message limit).
294  */
295 static inline int iucv_below_msglim(struct sock *sk)
296 {
297         struct iucv_sock *iucv = iucv_sk(sk);
298
299         if (sk->sk_state != IUCV_CONNECTED)
300                 return 1;
301         if (iucv->transport == AF_IUCV_TRANS_IUCV)
302                 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
303         else
304                 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
305                         (atomic_read(&iucv->pendings) <= 0));
306 }
307
308 /**
309  * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
310  */
311 static void iucv_sock_wake_msglim(struct sock *sk)
312 {
313         struct socket_wq *wq;
314
315         rcu_read_lock();
316         wq = rcu_dereference(sk->sk_wq);
317         if (wq_has_sleeper(wq))
318                 wake_up_interruptible_all(&wq->wait);
319         sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
320         rcu_read_unlock();
321 }
322
323 /**
324  * afiucv_hs_send() - send a message through HiperSockets transport
325  */
326 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
327                    struct sk_buff *skb, u8 flags)
328 {
329         struct iucv_sock *iucv = iucv_sk(sock);
330         struct af_iucv_trans_hdr *phs_hdr;
331         struct sk_buff *nskb;
332         int err, confirm_recv = 0;
333
334         memset(skb->head, 0, ETH_HLEN);
335         phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
336                                         sizeof(struct af_iucv_trans_hdr));
337         skb_reset_mac_header(skb);
338         skb_reset_network_header(skb);
339         skb_push(skb, ETH_HLEN);
340         skb_reset_mac_header(skb);
341         memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
342
343         phs_hdr->magic = ETH_P_AF_IUCV;
344         phs_hdr->version = 1;
345         phs_hdr->flags = flags;
346         if (flags == AF_IUCV_FLAG_SYN)
347                 phs_hdr->window = iucv->msglimit;
348         else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
349                 confirm_recv = atomic_read(&iucv->msg_recv);
350                 phs_hdr->window = confirm_recv;
351                 if (confirm_recv)
352                         phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
353         }
354         memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
355         memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
356         memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
357         memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
358         ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
359         ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
360         ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
361         ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
362         if (imsg)
363                 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
364
365         skb->dev = iucv->hs_dev;
366         if (!skb->dev)
367                 return -ENODEV;
368         if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
369                 return -ENETDOWN;
370         if (skb->len > skb->dev->mtu) {
371                 if (sock->sk_type == SOCK_SEQPACKET)
372                         return -EMSGSIZE;
373                 else
374                         skb_trim(skb, skb->dev->mtu);
375         }
376         skb->protocol = ETH_P_AF_IUCV;
377         skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
378         nskb = skb_clone(skb, GFP_ATOMIC);
379         if (!nskb)
380                 return -ENOMEM;
381         skb_queue_tail(&iucv->send_skb_q, nskb);
382         err = dev_queue_xmit(skb);
383         if (net_xmit_eval(err)) {
384                 skb_unlink(nskb, &iucv->send_skb_q);
385                 kfree_skb(nskb);
386         } else {
387                 atomic_sub(confirm_recv, &iucv->msg_recv);
388                 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
389         }
390         return net_xmit_eval(err);
391 }
392
393 static struct sock *__iucv_get_sock_by_name(char *nm)
394 {
395         struct sock *sk;
396         struct hlist_node *node;
397
398         sk_for_each(sk, node, &iucv_sk_list.head)
399                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
400                         return sk;
401
402         return NULL;
403 }
404
405 static void iucv_sock_destruct(struct sock *sk)
406 {
407         skb_queue_purge(&sk->sk_receive_queue);
408         skb_queue_purge(&sk->sk_write_queue);
409 }
410
411 /* Cleanup Listen */
412 static void iucv_sock_cleanup_listen(struct sock *parent)
413 {
414         struct sock *sk;
415
416         /* Close non-accepted connections */
417         while ((sk = iucv_accept_dequeue(parent, NULL))) {
418                 iucv_sock_close(sk);
419                 iucv_sock_kill(sk);
420         }
421
422         parent->sk_state = IUCV_CLOSED;
423 }
424
425 /* Kill socket (only if zapped and orphaned) */
426 static void iucv_sock_kill(struct sock *sk)
427 {
428         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
429                 return;
430
431         iucv_sock_unlink(&iucv_sk_list, sk);
432         sock_set_flag(sk, SOCK_DEAD);
433         sock_put(sk);
434 }
435
436 /* Terminate an IUCV path */
437 static void iucv_sever_path(struct sock *sk, int with_user_data)
438 {
439         unsigned char user_data[16];
440         struct iucv_sock *iucv = iucv_sk(sk);
441         struct iucv_path *path = iucv->path;
442
443         if (iucv->path) {
444                 iucv->path = NULL;
445                 if (with_user_data) {
446                         low_nmcpy(user_data, iucv->src_name);
447                         high_nmcpy(user_data, iucv->dst_name);
448                         ASCEBC(user_data, sizeof(user_data));
449                         pr_iucv->path_sever(path, user_data);
450                 } else
451                         pr_iucv->path_sever(path, NULL);
452                 iucv_path_free(path);
453         }
454 }
455
456 /* Close an IUCV socket */
457 static void iucv_sock_close(struct sock *sk)
458 {
459         struct iucv_sock *iucv = iucv_sk(sk);
460         unsigned long timeo;
461         int err = 0;
462         int blen;
463         struct sk_buff *skb;
464
465         lock_sock(sk);
466
467         switch (sk->sk_state) {
468         case IUCV_LISTEN:
469                 iucv_sock_cleanup_listen(sk);
470                 break;
471
472         case IUCV_CONNECTED:
473                 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
474                         /* send fin */
475                         blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
476                         skb = sock_alloc_send_skb(sk, blen, 1, &err);
477                         if (skb) {
478                                 skb_reserve(skb, blen);
479                                 err = afiucv_hs_send(NULL, sk, skb,
480                                                      AF_IUCV_FLAG_FIN);
481                         }
482                         sk->sk_state = IUCV_DISCONN;
483                         sk->sk_state_change(sk);
484                 }
485         case IUCV_DISCONN:   /* fall through */
486                 sk->sk_state = IUCV_CLOSING;
487                 sk->sk_state_change(sk);
488
489                 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
490                         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
491                                 timeo = sk->sk_lingertime;
492                         else
493                                 timeo = IUCV_DISCONN_TIMEOUT;
494                         iucv_sock_wait(sk,
495                                         iucv_sock_in_state(sk, IUCV_CLOSED, 0),
496                                         timeo);
497                 }
498
499         case IUCV_CLOSING:   /* fall through */
500                 sk->sk_state = IUCV_CLOSED;
501                 sk->sk_state_change(sk);
502
503                 sk->sk_err = ECONNRESET;
504                 sk->sk_state_change(sk);
505
506                 skb_queue_purge(&iucv->send_skb_q);
507                 skb_queue_purge(&iucv->backlog_skb_q);
508
509         default:   /* fall through */
510                 iucv_sever_path(sk, 1);
511         }
512
513         if (iucv->hs_dev) {
514                 dev_put(iucv->hs_dev);
515                 iucv->hs_dev = NULL;
516                 sk->sk_bound_dev_if = 0;
517         }
518
519         /* mark socket for deletion by iucv_sock_kill() */
520         sock_set_flag(sk, SOCK_ZAPPED);
521
522         release_sock(sk);
523 }
524
525 static void iucv_sock_init(struct sock *sk, struct sock *parent)
526 {
527         if (parent)
528                 sk->sk_type = parent->sk_type;
529 }
530
531 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
532 {
533         struct sock *sk;
534         struct iucv_sock *iucv;
535
536         sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
537         if (!sk)
538                 return NULL;
539         iucv = iucv_sk(sk);
540
541         sock_init_data(sock, sk);
542         INIT_LIST_HEAD(&iucv->accept_q);
543         spin_lock_init(&iucv->accept_q_lock);
544         skb_queue_head_init(&iucv->send_skb_q);
545         INIT_LIST_HEAD(&iucv->message_q.list);
546         spin_lock_init(&iucv->message_q.lock);
547         skb_queue_head_init(&iucv->backlog_skb_q);
548         iucv->send_tag = 0;
549         atomic_set(&iucv->pendings, 0);
550         iucv->flags = 0;
551         iucv->msglimit = 0;
552         atomic_set(&iucv->msg_sent, 0);
553         atomic_set(&iucv->msg_recv, 0);
554         iucv->path = NULL;
555         iucv->sk_txnotify = afiucv_hs_callback_txnotify;
556         memset(&iucv->src_user_id , 0, 32);
557         if (pr_iucv)
558                 iucv->transport = AF_IUCV_TRANS_IUCV;
559         else
560                 iucv->transport = AF_IUCV_TRANS_HIPER;
561
562         sk->sk_destruct = iucv_sock_destruct;
563         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
564         sk->sk_allocation = GFP_DMA;
565
566         sock_reset_flag(sk, SOCK_ZAPPED);
567
568         sk->sk_protocol = proto;
569         sk->sk_state    = IUCV_OPEN;
570
571         iucv_sock_link(&iucv_sk_list, sk);
572         return sk;
573 }
574
575 /* Create an IUCV socket */
576 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
577                             int kern)
578 {
579         struct sock *sk;
580
581         if (protocol && protocol != PF_IUCV)
582                 return -EPROTONOSUPPORT;
583
584         sock->state = SS_UNCONNECTED;
585
586         switch (sock->type) {
587         case SOCK_STREAM:
588                 sock->ops = &iucv_sock_ops;
589                 break;
590         case SOCK_SEQPACKET:
591                 /* currently, proto ops can handle both sk types */
592                 sock->ops = &iucv_sock_ops;
593                 break;
594         default:
595                 return -ESOCKTNOSUPPORT;
596         }
597
598         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
599         if (!sk)
600                 return -ENOMEM;
601
602         iucv_sock_init(sk, NULL);
603
604         return 0;
605 }
606
607 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
608 {
609         write_lock_bh(&l->lock);
610         sk_add_node(sk, &l->head);
611         write_unlock_bh(&l->lock);
612 }
613
614 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
615 {
616         write_lock_bh(&l->lock);
617         sk_del_node_init(sk);
618         write_unlock_bh(&l->lock);
619 }
620
621 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
622 {
623         unsigned long flags;
624         struct iucv_sock *par = iucv_sk(parent);
625
626         sock_hold(sk);
627         spin_lock_irqsave(&par->accept_q_lock, flags);
628         list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
629         spin_unlock_irqrestore(&par->accept_q_lock, flags);
630         iucv_sk(sk)->parent = parent;
631         sk_acceptq_added(parent);
632 }
633
634 void iucv_accept_unlink(struct sock *sk)
635 {
636         unsigned long flags;
637         struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
638
639         spin_lock_irqsave(&par->accept_q_lock, flags);
640         list_del_init(&iucv_sk(sk)->accept_q);
641         spin_unlock_irqrestore(&par->accept_q_lock, flags);
642         sk_acceptq_removed(iucv_sk(sk)->parent);
643         iucv_sk(sk)->parent = NULL;
644         sock_put(sk);
645 }
646
647 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
648 {
649         struct iucv_sock *isk, *n;
650         struct sock *sk;
651
652         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
653                 sk = (struct sock *) isk;
654                 lock_sock(sk);
655
656                 if (sk->sk_state == IUCV_CLOSED) {
657                         iucv_accept_unlink(sk);
658                         release_sock(sk);
659                         continue;
660                 }
661
662                 if (sk->sk_state == IUCV_CONNECTED ||
663                     sk->sk_state == IUCV_DISCONN ||
664                     !newsock) {
665                         iucv_accept_unlink(sk);
666                         if (newsock)
667                                 sock_graft(sk, newsock);
668
669                         release_sock(sk);
670                         return sk;
671                 }
672
673                 release_sock(sk);
674         }
675         return NULL;
676 }
677
678 /* Bind an unbound socket */
679 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
680                           int addr_len)
681 {
682         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
683         struct sock *sk = sock->sk;
684         struct iucv_sock *iucv;
685         int err = 0;
686         struct net_device *dev;
687         char uid[9];
688
689         /* Verify the input sockaddr */
690         if (!addr || addr->sa_family != AF_IUCV)
691                 return -EINVAL;
692
693         lock_sock(sk);
694         if (sk->sk_state != IUCV_OPEN) {
695                 err = -EBADFD;
696                 goto done;
697         }
698
699         write_lock_bh(&iucv_sk_list.lock);
700
701         iucv = iucv_sk(sk);
702         if (__iucv_get_sock_by_name(sa->siucv_name)) {
703                 err = -EADDRINUSE;
704                 goto done_unlock;
705         }
706         if (iucv->path)
707                 goto done_unlock;
708
709         /* Bind the socket */
710         if (pr_iucv)
711                 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
712                         goto vm_bind; /* VM IUCV transport */
713
714         /* try hiper transport */
715         memcpy(uid, sa->siucv_user_id, sizeof(uid));
716         ASCEBC(uid, 8);
717         rcu_read_lock();
718         for_each_netdev_rcu(&init_net, dev) {
719                 if (!memcmp(dev->perm_addr, uid, 8)) {
720                         memcpy(iucv->src_name, sa->siucv_name, 8);
721                         memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
722                         sk->sk_bound_dev_if = dev->ifindex;
723                         iucv->hs_dev = dev;
724                         dev_hold(dev);
725                         sk->sk_state = IUCV_BOUND;
726                         iucv->transport = AF_IUCV_TRANS_HIPER;
727                         if (!iucv->msglimit)
728                                 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
729                         rcu_read_unlock();
730                         goto done_unlock;
731                 }
732         }
733         rcu_read_unlock();
734 vm_bind:
735         if (pr_iucv) {
736                 /* use local userid for backward compat */
737                 memcpy(iucv->src_name, sa->siucv_name, 8);
738                 memcpy(iucv->src_user_id, iucv_userid, 8);
739                 sk->sk_state = IUCV_BOUND;
740                 iucv->transport = AF_IUCV_TRANS_IUCV;
741                 if (!iucv->msglimit)
742                         iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
743                 goto done_unlock;
744         }
745         /* found no dev to bind */
746         err = -ENODEV;
747 done_unlock:
748         /* Release the socket list lock */
749         write_unlock_bh(&iucv_sk_list.lock);
750 done:
751         release_sock(sk);
752         return err;
753 }
754
755 /* Automatically bind an unbound socket */
756 static int iucv_sock_autobind(struct sock *sk)
757 {
758         struct iucv_sock *iucv = iucv_sk(sk);
759         char name[12];
760         int err = 0;
761
762         if (unlikely(!pr_iucv))
763                 return -EPROTO;
764
765         memcpy(iucv->src_user_id, iucv_userid, 8);
766
767         write_lock_bh(&iucv_sk_list.lock);
768
769         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
770         while (__iucv_get_sock_by_name(name)) {
771                 sprintf(name, "%08x",
772                         atomic_inc_return(&iucv_sk_list.autobind_name));
773         }
774
775         write_unlock_bh(&iucv_sk_list.lock);
776
777         memcpy(&iucv->src_name, name, 8);
778
779         if (!iucv->msglimit)
780                 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
781
782         return err;
783 }
784
785 static int afiucv_hs_connect(struct socket *sock)
786 {
787         struct sock *sk = sock->sk;
788         struct sk_buff *skb;
789         int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
790         int err = 0;
791
792         /* send syn */
793         skb = sock_alloc_send_skb(sk, blen, 1, &err);
794         if (!skb) {
795                 err = -ENOMEM;
796                 goto done;
797         }
798         skb->dev = NULL;
799         skb_reserve(skb, blen);
800         err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
801 done:
802         return err;
803 }
804
805 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
806 {
807         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
808         struct sock *sk = sock->sk;
809         struct iucv_sock *iucv = iucv_sk(sk);
810         unsigned char user_data[16];
811         int err;
812
813         high_nmcpy(user_data, sa->siucv_name);
814         low_nmcpy(user_data, iucv->src_name);
815         ASCEBC(user_data, sizeof(user_data));
816
817         /* Create path. */
818         iucv->path = iucv_path_alloc(iucv->msglimit,
819                                      IUCV_IPRMDATA, GFP_KERNEL);
820         if (!iucv->path) {
821                 err = -ENOMEM;
822                 goto done;
823         }
824         err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
825                                     sa->siucv_user_id, NULL, user_data,
826                                     sk);
827         if (err) {
828                 iucv_path_free(iucv->path);
829                 iucv->path = NULL;
830                 switch (err) {
831                 case 0x0b:      /* Target communicator is not logged on */
832                         err = -ENETUNREACH;
833                         break;
834                 case 0x0d:      /* Max connections for this guest exceeded */
835                 case 0x0e:      /* Max connections for target guest exceeded */
836                         err = -EAGAIN;
837                         break;
838                 case 0x0f:      /* Missing IUCV authorization */
839                         err = -EACCES;
840                         break;
841                 default:
842                         err = -ECONNREFUSED;
843                         break;
844                 }
845         }
846 done:
847         return err;
848 }
849
850 /* Connect an unconnected socket */
851 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
852                              int alen, int flags)
853 {
854         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
855         struct sock *sk = sock->sk;
856         struct iucv_sock *iucv = iucv_sk(sk);
857         int err;
858
859         if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
860                 return -EINVAL;
861
862         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
863                 return -EBADFD;
864
865         if (sk->sk_state == IUCV_OPEN &&
866             iucv->transport == AF_IUCV_TRANS_HIPER)
867                 return -EBADFD; /* explicit bind required */
868
869         if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
870                 return -EINVAL;
871
872         if (sk->sk_state == IUCV_OPEN) {
873                 err = iucv_sock_autobind(sk);
874                 if (unlikely(err))
875                         return err;
876         }
877
878         lock_sock(sk);
879
880         /* Set the destination information */
881         memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
882         memcpy(iucv->dst_name, sa->siucv_name, 8);
883
884         if (iucv->transport == AF_IUCV_TRANS_HIPER)
885                 err = afiucv_hs_connect(sock);
886         else
887                 err = afiucv_path_connect(sock, addr);
888         if (err)
889                 goto done;
890
891         if (sk->sk_state != IUCV_CONNECTED)
892                 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
893                                                             IUCV_DISCONN),
894                                      sock_sndtimeo(sk, flags & O_NONBLOCK));
895
896         if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
897                 err = -ECONNREFUSED;
898
899         if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
900                 iucv_sever_path(sk, 0);
901
902 done:
903         release_sock(sk);
904         return err;
905 }
906
907 /* Move a socket into listening state. */
908 static int iucv_sock_listen(struct socket *sock, int backlog)
909 {
910         struct sock *sk = sock->sk;
911         int err;
912
913         lock_sock(sk);
914
915         err = -EINVAL;
916         if (sk->sk_state != IUCV_BOUND)
917                 goto done;
918
919         if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
920                 goto done;
921
922         sk->sk_max_ack_backlog = backlog;
923         sk->sk_ack_backlog = 0;
924         sk->sk_state = IUCV_LISTEN;
925         err = 0;
926
927 done:
928         release_sock(sk);
929         return err;
930 }
931
932 /* Accept a pending connection */
933 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
934                             int flags)
935 {
936         DECLARE_WAITQUEUE(wait, current);
937         struct sock *sk = sock->sk, *nsk;
938         long timeo;
939         int err = 0;
940
941         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
942
943         if (sk->sk_state != IUCV_LISTEN) {
944                 err = -EBADFD;
945                 goto done;
946         }
947
948         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
949
950         /* Wait for an incoming connection */
951         add_wait_queue_exclusive(sk_sleep(sk), &wait);
952         while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
953                 set_current_state(TASK_INTERRUPTIBLE);
954                 if (!timeo) {
955                         err = -EAGAIN;
956                         break;
957                 }
958
959                 release_sock(sk);
960                 timeo = schedule_timeout(timeo);
961                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
962
963                 if (sk->sk_state != IUCV_LISTEN) {
964                         err = -EBADFD;
965                         break;
966                 }
967
968                 if (signal_pending(current)) {
969                         err = sock_intr_errno(timeo);
970                         break;
971                 }
972         }
973
974         set_current_state(TASK_RUNNING);
975         remove_wait_queue(sk_sleep(sk), &wait);
976
977         if (err)
978                 goto done;
979
980         newsock->state = SS_CONNECTED;
981
982 done:
983         release_sock(sk);
984         return err;
985 }
986
987 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
988                              int *len, int peer)
989 {
990         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
991         struct sock *sk = sock->sk;
992         struct iucv_sock *iucv = iucv_sk(sk);
993
994         addr->sa_family = AF_IUCV;
995         *len = sizeof(struct sockaddr_iucv);
996
997         if (peer) {
998                 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
999                 memcpy(siucv->siucv_name, iucv->dst_name, 8);
1000         } else {
1001                 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1002                 memcpy(siucv->siucv_name, iucv->src_name, 8);
1003         }
1004         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1005         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1006         memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1007
1008         return 0;
1009 }
1010
1011 /**
1012  * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1013  * @path:       IUCV path
1014  * @msg:        Pointer to a struct iucv_message
1015  * @skb:        The socket data to send, skb->len MUST BE <= 7
1016  *
1017  * Send the socket data in the parameter list in the iucv message
1018  * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1019  * list and the socket data len at index 7 (last byte).
1020  * See also iucv_msg_length().
1021  *
1022  * Returns the error code from the iucv_message_send() call.
1023  */
1024 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1025                           struct sk_buff *skb)
1026 {
1027         u8 prmdata[8];
1028
1029         memcpy(prmdata, (void *) skb->data, skb->len);
1030         prmdata[7] = 0xff - (u8) skb->len;
1031         return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1032                                  (void *) prmdata, 8);
1033 }
1034
1035 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1036                              struct msghdr *msg, size_t len)
1037 {
1038         struct sock *sk = sock->sk;
1039         struct iucv_sock *iucv = iucv_sk(sk);
1040         struct sk_buff *skb;
1041         struct iucv_message txmsg;
1042         struct cmsghdr *cmsg;
1043         int cmsg_done;
1044         long timeo;
1045         char user_id[9];
1046         char appl_id[9];
1047         int err;
1048         int noblock = msg->msg_flags & MSG_DONTWAIT;
1049
1050         err = sock_error(sk);
1051         if (err)
1052                 return err;
1053
1054         if (msg->msg_flags & MSG_OOB)
1055                 return -EOPNOTSUPP;
1056
1057         /* SOCK_SEQPACKET: we do not support segmented records */
1058         if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1059                 return -EOPNOTSUPP;
1060
1061         lock_sock(sk);
1062
1063         if (sk->sk_shutdown & SEND_SHUTDOWN) {
1064                 err = -EPIPE;
1065                 goto out;
1066         }
1067
1068         /* Return if the socket is not in connected state */
1069         if (sk->sk_state != IUCV_CONNECTED) {
1070                 err = -ENOTCONN;
1071                 goto out;
1072         }
1073
1074         /* initialize defaults */
1075         cmsg_done   = 0;        /* check for duplicate headers */
1076         txmsg.class = 0;
1077
1078         /* iterate over control messages */
1079         for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1080                 cmsg = CMSG_NXTHDR(msg, cmsg)) {
1081
1082                 if (!CMSG_OK(msg, cmsg)) {
1083                         err = -EINVAL;
1084                         goto out;
1085                 }
1086
1087                 if (cmsg->cmsg_level != SOL_IUCV)
1088                         continue;
1089
1090                 if (cmsg->cmsg_type & cmsg_done) {
1091                         err = -EINVAL;
1092                         goto out;
1093                 }
1094                 cmsg_done |= cmsg->cmsg_type;
1095
1096                 switch (cmsg->cmsg_type) {
1097                 case SCM_IUCV_TRGCLS:
1098                         if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1099                                 err = -EINVAL;
1100                                 goto out;
1101                         }
1102
1103                         /* set iucv message target class */
1104                         memcpy(&txmsg.class,
1105                                 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1106
1107                         break;
1108
1109                 default:
1110                         err = -EINVAL;
1111                         goto out;
1112                         break;
1113                 }
1114         }
1115
1116         /* allocate one skb for each iucv message:
1117          * this is fine for SOCK_SEQPACKET (unless we want to support
1118          * segmented records using the MSG_EOR flag), but
1119          * for SOCK_STREAM we might want to improve it in future */
1120         if (iucv->transport == AF_IUCV_TRANS_HIPER)
1121                 skb = sock_alloc_send_skb(sk,
1122                         len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1123                         noblock, &err);
1124         else
1125                 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1126         if (!skb) {
1127                 err = -ENOMEM;
1128                 goto out;
1129         }
1130         if (iucv->transport == AF_IUCV_TRANS_HIPER)
1131                 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1132         if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1133                 err = -EFAULT;
1134                 goto fail;
1135         }
1136
1137         /* wait if outstanding messages for iucv path has reached */
1138         timeo = sock_sndtimeo(sk, noblock);
1139         err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1140         if (err)
1141                 goto fail;
1142
1143         /* return -ECONNRESET if the socket is no longer connected */
1144         if (sk->sk_state != IUCV_CONNECTED) {
1145                 err = -ECONNRESET;
1146                 goto fail;
1147         }
1148
1149         /* increment and save iucv message tag for msg_completion cbk */
1150         txmsg.tag = iucv->send_tag++;
1151         memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1152
1153         if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1154                 atomic_inc(&iucv->msg_sent);
1155                 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1156                 if (err) {
1157                         atomic_dec(&iucv->msg_sent);
1158                         goto fail;
1159                 }
1160                 goto release;
1161         }
1162         skb_queue_tail(&iucv->send_skb_q, skb);
1163
1164         if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1165               && skb->len <= 7) {
1166                 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1167
1168                 /* on success: there is no message_complete callback
1169                  * for an IPRMDATA msg; remove skb from send queue */
1170                 if (err == 0) {
1171                         skb_unlink(skb, &iucv->send_skb_q);
1172                         kfree_skb(skb);
1173                 }
1174
1175                 /* this error should never happen since the
1176                  * IUCV_IPRMDATA path flag is set... sever path */
1177                 if (err == 0x15) {
1178                         pr_iucv->path_sever(iucv->path, NULL);
1179                         skb_unlink(skb, &iucv->send_skb_q);
1180                         err = -EPIPE;
1181                         goto fail;
1182                 }
1183         } else
1184                 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1185                                         (void *) skb->data, skb->len);
1186         if (err) {
1187                 if (err == 3) {
1188                         user_id[8] = 0;
1189                         memcpy(user_id, iucv->dst_user_id, 8);
1190                         appl_id[8] = 0;
1191                         memcpy(appl_id, iucv->dst_name, 8);
1192                         pr_err("Application %s on z/VM guest %s"
1193                                 " exceeds message limit\n",
1194                                 appl_id, user_id);
1195                         err = -EAGAIN;
1196                 } else
1197                         err = -EPIPE;
1198                 skb_unlink(skb, &iucv->send_skb_q);
1199                 goto fail;
1200         }
1201
1202 release:
1203         release_sock(sk);
1204         return len;
1205
1206 fail:
1207         kfree_skb(skb);
1208 out:
1209         release_sock(sk);
1210         return err;
1211 }
1212
1213 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1214  *
1215  * Locking: must be called with message_q.lock held
1216  */
1217 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1218 {
1219         int dataleft, size, copied = 0;
1220         struct sk_buff *nskb;
1221
1222         dataleft = len;
1223         while (dataleft) {
1224                 if (dataleft >= sk->sk_rcvbuf / 4)
1225                         size = sk->sk_rcvbuf / 4;
1226                 else
1227                         size = dataleft;
1228
1229                 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1230                 if (!nskb)
1231                         return -ENOMEM;
1232
1233                 /* copy target class to control buffer of new skb */
1234                 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1235
1236                 /* copy data fragment */
1237                 memcpy(nskb->data, skb->data + copied, size);
1238                 copied += size;
1239                 dataleft -= size;
1240
1241                 skb_reset_transport_header(nskb);
1242                 skb_reset_network_header(nskb);
1243                 nskb->len = size;
1244
1245                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1246         }
1247
1248         return 0;
1249 }
1250
1251 /* iucv_process_message() - Receive a single outstanding IUCV message
1252  *
1253  * Locking: must be called with message_q.lock held
1254  */
1255 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256                                  struct iucv_path *path,
1257                                  struct iucv_message *msg)
1258 {
1259         int rc;
1260         unsigned int len;
1261
1262         len = iucv_msg_length(msg);
1263
1264         /* store msg target class in the second 4 bytes of skb ctrl buffer */
1265         /* Note: the first 4 bytes are reserved for msg tag */
1266         memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1267
1268         /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1269         if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1270                 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1271                         skb->data = NULL;
1272                         skb->len = 0;
1273                 }
1274         } else {
1275                 rc = pr_iucv->message_receive(path, msg,
1276                                               msg->flags & IUCV_IPRMDATA,
1277                                               skb->data, len, NULL);
1278                 if (rc) {
1279                         kfree_skb(skb);
1280                         return;
1281                 }
1282                 /* we need to fragment iucv messages for SOCK_STREAM only;
1283                  * for SOCK_SEQPACKET, it is only relevant if we support
1284                  * record segmentation using MSG_EOR (see also recvmsg()) */
1285                 if (sk->sk_type == SOCK_STREAM &&
1286                     skb->truesize >= sk->sk_rcvbuf / 4) {
1287                         rc = iucv_fragment_skb(sk, skb, len);
1288                         kfree_skb(skb);
1289                         skb = NULL;
1290                         if (rc) {
1291                                 pr_iucv->path_sever(path, NULL);
1292                                 return;
1293                         }
1294                         skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1295                 } else {
1296                         skb_reset_transport_header(skb);
1297                         skb_reset_network_header(skb);
1298                         skb->len = len;
1299                 }
1300         }
1301
1302         if (sock_queue_rcv_skb(sk, skb))
1303                 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1304 }
1305
1306 /* iucv_process_message_q() - Process outstanding IUCV messages
1307  *
1308  * Locking: must be called with message_q.lock held
1309  */
1310 static void iucv_process_message_q(struct sock *sk)
1311 {
1312         struct iucv_sock *iucv = iucv_sk(sk);
1313         struct sk_buff *skb;
1314         struct sock_msg_q *p, *n;
1315
1316         list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1317                 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1318                 if (!skb)
1319                         break;
1320                 iucv_process_message(sk, skb, p->path, &p->msg);
1321                 list_del(&p->list);
1322                 kfree(p);
1323                 if (!skb_queue_empty(&iucv->backlog_skb_q))
1324                         break;
1325         }
1326 }
1327
1328 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1329                              struct msghdr *msg, size_t len, int flags)
1330 {
1331         int noblock = flags & MSG_DONTWAIT;
1332         struct sock *sk = sock->sk;
1333         struct iucv_sock *iucv = iucv_sk(sk);
1334         unsigned int copied, rlen;
1335         struct sk_buff *skb, *rskb, *cskb, *sskb;
1336         int blen;
1337         int err = 0;
1338
1339         if ((sk->sk_state == IUCV_DISCONN) &&
1340             skb_queue_empty(&iucv->backlog_skb_q) &&
1341             skb_queue_empty(&sk->sk_receive_queue) &&
1342             list_empty(&iucv->message_q.list))
1343                 return 0;
1344
1345         if (flags & (MSG_OOB))
1346                 return -EOPNOTSUPP;
1347
1348         /* receive/dequeue next skb:
1349          * the function understands MSG_PEEK and, thus, does not dequeue skb */
1350         skb = skb_recv_datagram(sk, flags, noblock, &err);
1351         if (!skb) {
1352                 if (sk->sk_shutdown & RCV_SHUTDOWN)
1353                         return 0;
1354                 return err;
1355         }
1356
1357         rlen   = skb->len;              /* real length of skb */
1358         copied = min_t(unsigned int, rlen, len);
1359
1360         cskb = skb;
1361         if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1362                 if (!(flags & MSG_PEEK))
1363                         skb_queue_head(&sk->sk_receive_queue, skb);
1364                 return -EFAULT;
1365         }
1366
1367         /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1368         if (sk->sk_type == SOCK_SEQPACKET) {
1369                 if (copied < rlen)
1370                         msg->msg_flags |= MSG_TRUNC;
1371                 /* each iucv message contains a complete record */
1372                 msg->msg_flags |= MSG_EOR;
1373         }
1374
1375         /* create control message to store iucv msg target class:
1376          * get the trgcls from the control buffer of the skb due to
1377          * fragmentation of original iucv message. */
1378         err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1379                         CB_TRGCLS_LEN, CB_TRGCLS(skb));
1380         if (err) {
1381                 if (!(flags & MSG_PEEK))
1382                         skb_queue_head(&sk->sk_receive_queue, skb);
1383                 return err;
1384         }
1385
1386         /* Mark read part of skb as used */
1387         if (!(flags & MSG_PEEK)) {
1388
1389                 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1390                 if (sk->sk_type == SOCK_STREAM) {
1391                         skb_pull(skb, copied);
1392                         if (skb->len) {
1393                                 skb_queue_head(&sk->sk_receive_queue, skb);
1394                                 goto done;
1395                         }
1396                 }
1397
1398                 kfree_skb(skb);
1399                 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1400                         atomic_inc(&iucv->msg_recv);
1401                         if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1402                                 WARN_ON(1);
1403                                 iucv_sock_close(sk);
1404                                 return -EFAULT;
1405                         }
1406                 }
1407
1408                 /* Queue backlog skbs */
1409                 spin_lock_bh(&iucv->message_q.lock);
1410                 rskb = skb_dequeue(&iucv->backlog_skb_q);
1411                 while (rskb) {
1412                         if (sock_queue_rcv_skb(sk, rskb)) {
1413                                 skb_queue_head(&iucv->backlog_skb_q,
1414                                                 rskb);
1415                                 break;
1416                         } else {
1417                                 rskb = skb_dequeue(&iucv->backlog_skb_q);
1418                         }
1419                 }
1420                 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1421                         if (!list_empty(&iucv->message_q.list))
1422                                 iucv_process_message_q(sk);
1423                         if (atomic_read(&iucv->msg_recv) >=
1424                                                         iucv->msglimit / 2) {
1425                                 /* send WIN to peer */
1426                                 blen = sizeof(struct af_iucv_trans_hdr) +
1427                                         ETH_HLEN;
1428                                 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1429                                 if (sskb) {
1430                                         skb_reserve(sskb, blen);
1431                                         err = afiucv_hs_send(NULL, sk, sskb,
1432                                                              AF_IUCV_FLAG_WIN);
1433                                 }
1434                                 if (err) {
1435                                         sk->sk_state = IUCV_DISCONN;
1436                                         sk->sk_state_change(sk);
1437                                 }
1438                         }
1439                 }
1440                 spin_unlock_bh(&iucv->message_q.lock);
1441         }
1442
1443 done:
1444         /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1445         if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1446                 copied = rlen;
1447
1448         return copied;
1449 }
1450
1451 static inline unsigned int iucv_accept_poll(struct sock *parent)
1452 {
1453         struct iucv_sock *isk, *n;
1454         struct sock *sk;
1455
1456         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1457                 sk = (struct sock *) isk;
1458
1459                 if (sk->sk_state == IUCV_CONNECTED)
1460                         return POLLIN | POLLRDNORM;
1461         }
1462
1463         return 0;
1464 }
1465
1466 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1467                             poll_table *wait)
1468 {
1469         struct sock *sk = sock->sk;
1470         unsigned int mask = 0;
1471
1472         sock_poll_wait(file, sk_sleep(sk), wait);
1473
1474         if (sk->sk_state == IUCV_LISTEN)
1475                 return iucv_accept_poll(sk);
1476
1477         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1478                 mask |= POLLERR;
1479
1480         if (sk->sk_shutdown & RCV_SHUTDOWN)
1481                 mask |= POLLRDHUP;
1482
1483         if (sk->sk_shutdown == SHUTDOWN_MASK)
1484                 mask |= POLLHUP;
1485
1486         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1487             (sk->sk_shutdown & RCV_SHUTDOWN))
1488                 mask |= POLLIN | POLLRDNORM;
1489
1490         if (sk->sk_state == IUCV_CLOSED)
1491                 mask |= POLLHUP;
1492
1493         if (sk->sk_state == IUCV_DISCONN)
1494                 mask |= POLLIN;
1495
1496         if (sock_writeable(sk) && iucv_below_msglim(sk))
1497                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1498         else
1499                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1500
1501         return mask;
1502 }
1503
1504 static int iucv_sock_shutdown(struct socket *sock, int how)
1505 {
1506         struct sock *sk = sock->sk;
1507         struct iucv_sock *iucv = iucv_sk(sk);
1508         struct iucv_message txmsg;
1509         int err = 0;
1510
1511         how++;
1512
1513         if ((how & ~SHUTDOWN_MASK) || !how)
1514                 return -EINVAL;
1515
1516         lock_sock(sk);
1517         switch (sk->sk_state) {
1518         case IUCV_DISCONN:
1519         case IUCV_CLOSING:
1520         case IUCV_CLOSED:
1521                 err = -ENOTCONN;
1522                 goto fail;
1523
1524         default:
1525                 sk->sk_shutdown |= how;
1526                 break;
1527         }
1528
1529         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1530                 txmsg.class = 0;
1531                 txmsg.tag = 0;
1532                 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1533                                         0, (void *) iprm_shutdown, 8);
1534                 if (err) {
1535                         switch (err) {
1536                         case 1:
1537                                 err = -ENOTCONN;
1538                                 break;
1539                         case 2:
1540                                 err = -ECONNRESET;
1541                                 break;
1542                         default:
1543                                 err = -ENOTCONN;
1544                                 break;
1545                         }
1546                 }
1547         }
1548
1549         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1550                 err = pr_iucv->path_quiesce(iucv->path, NULL);
1551                 if (err)
1552                         err = -ENOTCONN;
1553
1554                 skb_queue_purge(&sk->sk_receive_queue);
1555         }
1556
1557         /* Wake up anyone sleeping in poll */
1558         sk->sk_state_change(sk);
1559
1560 fail:
1561         release_sock(sk);
1562         return err;
1563 }
1564
1565 static int iucv_sock_release(struct socket *sock)
1566 {
1567         struct sock *sk = sock->sk;
1568         int err = 0;
1569
1570         if (!sk)
1571                 return 0;
1572
1573         iucv_sock_close(sk);
1574
1575         sock_orphan(sk);
1576         iucv_sock_kill(sk);
1577         return err;
1578 }
1579
1580 /* getsockopt and setsockopt */
1581 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1582                                 char __user *optval, unsigned int optlen)
1583 {
1584         struct sock *sk = sock->sk;
1585         struct iucv_sock *iucv = iucv_sk(sk);
1586         int val;
1587         int rc;
1588
1589         if (level != SOL_IUCV)
1590                 return -ENOPROTOOPT;
1591
1592         if (optlen < sizeof(int))
1593                 return -EINVAL;
1594
1595         if (get_user(val, (int __user *) optval))
1596                 return -EFAULT;
1597
1598         rc = 0;
1599
1600         lock_sock(sk);
1601         switch (optname) {
1602         case SO_IPRMDATA_MSG:
1603                 if (val)
1604                         iucv->flags |= IUCV_IPRMDATA;
1605                 else
1606                         iucv->flags &= ~IUCV_IPRMDATA;
1607                 break;
1608         case SO_MSGLIMIT:
1609                 switch (sk->sk_state) {
1610                 case IUCV_OPEN:
1611                 case IUCV_BOUND:
1612                         if (val < 1 || val > (u16)(~0))
1613                                 rc = -EINVAL;
1614                         else
1615                                 iucv->msglimit = val;
1616                         break;
1617                 default:
1618                         rc = -EINVAL;
1619                         break;
1620                 }
1621                 break;
1622         default:
1623                 rc = -ENOPROTOOPT;
1624                 break;
1625         }
1626         release_sock(sk);
1627
1628         return rc;
1629 }
1630
1631 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1632                                 char __user *optval, int __user *optlen)
1633 {
1634         struct sock *sk = sock->sk;
1635         struct iucv_sock *iucv = iucv_sk(sk);
1636         int val, len;
1637
1638         if (level != SOL_IUCV)
1639                 return -ENOPROTOOPT;
1640
1641         if (get_user(len, optlen))
1642                 return -EFAULT;
1643
1644         if (len < 0)
1645                 return -EINVAL;
1646
1647         len = min_t(unsigned int, len, sizeof(int));
1648
1649         switch (optname) {
1650         case SO_IPRMDATA_MSG:
1651                 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1652                 break;
1653         case SO_MSGLIMIT:
1654                 lock_sock(sk);
1655                 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1656                                            : iucv->msglimit;    /* default */
1657                 release_sock(sk);
1658                 break;
1659         default:
1660                 return -ENOPROTOOPT;
1661         }
1662
1663         if (put_user(len, optlen))
1664                 return -EFAULT;
1665         if (copy_to_user(optval, &val, len))
1666                 return -EFAULT;
1667
1668         return 0;
1669 }
1670
1671
1672 /* Callback wrappers - called from iucv base support */
1673 static int iucv_callback_connreq(struct iucv_path *path,
1674                                  u8 ipvmid[8], u8 ipuser[16])
1675 {
1676         unsigned char user_data[16];
1677         unsigned char nuser_data[16];
1678         unsigned char src_name[8];
1679         struct hlist_node *node;
1680         struct sock *sk, *nsk;
1681         struct iucv_sock *iucv, *niucv;
1682         int err;
1683
1684         memcpy(src_name, ipuser, 8);
1685         EBCASC(src_name, 8);
1686         /* Find out if this path belongs to af_iucv. */
1687         read_lock(&iucv_sk_list.lock);
1688         iucv = NULL;
1689         sk = NULL;
1690         sk_for_each(sk, node, &iucv_sk_list.head)
1691                 if (sk->sk_state == IUCV_LISTEN &&
1692                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1693                         /*
1694                          * Found a listening socket with
1695                          * src_name == ipuser[0-7].
1696                          */
1697                         iucv = iucv_sk(sk);
1698                         break;
1699                 }
1700         read_unlock(&iucv_sk_list.lock);
1701         if (!iucv)
1702                 /* No socket found, not one of our paths. */
1703                 return -EINVAL;
1704
1705         bh_lock_sock(sk);
1706
1707         /* Check if parent socket is listening */
1708         low_nmcpy(user_data, iucv->src_name);
1709         high_nmcpy(user_data, iucv->dst_name);
1710         ASCEBC(user_data, sizeof(user_data));
1711         if (sk->sk_state != IUCV_LISTEN) {
1712                 err = pr_iucv->path_sever(path, user_data);
1713                 iucv_path_free(path);
1714                 goto fail;
1715         }
1716
1717         /* Check for backlog size */
1718         if (sk_acceptq_is_full(sk)) {
1719                 err = pr_iucv->path_sever(path, user_data);
1720                 iucv_path_free(path);
1721                 goto fail;
1722         }
1723
1724         /* Create the new socket */
1725         nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1726         if (!nsk) {
1727                 err = pr_iucv->path_sever(path, user_data);
1728                 iucv_path_free(path);
1729                 goto fail;
1730         }
1731
1732         niucv = iucv_sk(nsk);
1733         iucv_sock_init(nsk, sk);
1734
1735         /* Set the new iucv_sock */
1736         memcpy(niucv->dst_name, ipuser + 8, 8);
1737         EBCASC(niucv->dst_name, 8);
1738         memcpy(niucv->dst_user_id, ipvmid, 8);
1739         memcpy(niucv->src_name, iucv->src_name, 8);
1740         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1741         niucv->path = path;
1742
1743         /* Call iucv_accept */
1744         high_nmcpy(nuser_data, ipuser + 8);
1745         memcpy(nuser_data + 8, niucv->src_name, 8);
1746         ASCEBC(nuser_data + 8, 8);
1747
1748         /* set message limit for path based on msglimit of accepting socket */
1749         niucv->msglimit = iucv->msglimit;
1750         path->msglim = iucv->msglimit;
1751         err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1752         if (err) {
1753                 iucv_sever_path(nsk, 1);
1754                 iucv_sock_kill(nsk);
1755                 goto fail;
1756         }
1757
1758         iucv_accept_enqueue(sk, nsk);
1759
1760         /* Wake up accept */
1761         nsk->sk_state = IUCV_CONNECTED;
1762         sk->sk_data_ready(sk, 1);
1763         err = 0;
1764 fail:
1765         bh_unlock_sock(sk);
1766         return 0;
1767 }
1768
1769 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1770 {
1771         struct sock *sk = path->private;
1772
1773         sk->sk_state = IUCV_CONNECTED;
1774         sk->sk_state_change(sk);
1775 }
1776
1777 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1778 {
1779         struct sock *sk = path->private;
1780         struct iucv_sock *iucv = iucv_sk(sk);
1781         struct sk_buff *skb;
1782         struct sock_msg_q *save_msg;
1783         int len;
1784
1785         if (sk->sk_shutdown & RCV_SHUTDOWN) {
1786                 pr_iucv->message_reject(path, msg);
1787                 return;
1788         }
1789
1790         spin_lock(&iucv->message_q.lock);
1791
1792         if (!list_empty(&iucv->message_q.list) ||
1793             !skb_queue_empty(&iucv->backlog_skb_q))
1794                 goto save_message;
1795
1796         len = atomic_read(&sk->sk_rmem_alloc);
1797         len += SKB_TRUESIZE(iucv_msg_length(msg));
1798         if (len > sk->sk_rcvbuf)
1799                 goto save_message;
1800
1801         skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1802         if (!skb)
1803                 goto save_message;
1804
1805         iucv_process_message(sk, skb, path, msg);
1806         goto out_unlock;
1807
1808 save_message:
1809         save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1810         if (!save_msg)
1811                 goto out_unlock;
1812         save_msg->path = path;
1813         save_msg->msg = *msg;
1814
1815         list_add_tail(&save_msg->list, &iucv->message_q.list);
1816
1817 out_unlock:
1818         spin_unlock(&iucv->message_q.lock);
1819 }
1820
1821 static void iucv_callback_txdone(struct iucv_path *path,
1822                                  struct iucv_message *msg)
1823 {
1824         struct sock *sk = path->private;
1825         struct sk_buff *this = NULL;
1826         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1827         struct sk_buff *list_skb = list->next;
1828         unsigned long flags;
1829
1830         bh_lock_sock(sk);
1831         if (!skb_queue_empty(list)) {
1832                 spin_lock_irqsave(&list->lock, flags);
1833
1834                 while (list_skb != (struct sk_buff *)list) {
1835                         if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1836                                 this = list_skb;
1837                                 break;
1838                         }
1839                         list_skb = list_skb->next;
1840                 }
1841                 if (this)
1842                         __skb_unlink(this, list);
1843
1844                 spin_unlock_irqrestore(&list->lock, flags);
1845
1846                 if (this) {
1847                         kfree_skb(this);
1848                         /* wake up any process waiting for sending */
1849                         iucv_sock_wake_msglim(sk);
1850                 }
1851         }
1852
1853         if (sk->sk_state == IUCV_CLOSING) {
1854                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1855                         sk->sk_state = IUCV_CLOSED;
1856                         sk->sk_state_change(sk);
1857                 }
1858         }
1859         bh_unlock_sock(sk);
1860
1861 }
1862
1863 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1864 {
1865         struct sock *sk = path->private;
1866
1867         if (sk->sk_state == IUCV_CLOSED)
1868                 return;
1869
1870         bh_lock_sock(sk);
1871         iucv_sever_path(sk, 1);
1872         sk->sk_state = IUCV_DISCONN;
1873
1874         sk->sk_state_change(sk);
1875         bh_unlock_sock(sk);
1876 }
1877
1878 /* called if the other communication side shuts down its RECV direction;
1879  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1880  */
1881 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1882 {
1883         struct sock *sk = path->private;
1884
1885         bh_lock_sock(sk);
1886         if (sk->sk_state != IUCV_CLOSED) {
1887                 sk->sk_shutdown |= SEND_SHUTDOWN;
1888                 sk->sk_state_change(sk);
1889         }
1890         bh_unlock_sock(sk);
1891 }
1892
1893 /***************** HiperSockets transport callbacks ********************/
1894 static void afiucv_swap_src_dest(struct sk_buff *skb)
1895 {
1896         struct af_iucv_trans_hdr *trans_hdr =
1897                                 (struct af_iucv_trans_hdr *)skb->data;
1898         char tmpID[8];
1899         char tmpName[8];
1900
1901         ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1902         ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1903         ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1904         ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1905         memcpy(tmpID, trans_hdr->srcUserID, 8);
1906         memcpy(tmpName, trans_hdr->srcAppName, 8);
1907         memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1908         memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1909         memcpy(trans_hdr->destUserID, tmpID, 8);
1910         memcpy(trans_hdr->destAppName, tmpName, 8);
1911         skb_push(skb, ETH_HLEN);
1912         memset(skb->data, 0, ETH_HLEN);
1913 }
1914
1915 /**
1916  * afiucv_hs_callback_syn - react on received SYN
1917  **/
1918 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1919 {
1920         struct sock *nsk;
1921         struct iucv_sock *iucv, *niucv;
1922         struct af_iucv_trans_hdr *trans_hdr;
1923         int err;
1924
1925         iucv = iucv_sk(sk);
1926         trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1927         if (!iucv) {
1928                 /* no sock - connection refused */
1929                 afiucv_swap_src_dest(skb);
1930                 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1931                 err = dev_queue_xmit(skb);
1932                 goto out;
1933         }
1934
1935         nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1936         bh_lock_sock(sk);
1937         if ((sk->sk_state != IUCV_LISTEN) ||
1938             sk_acceptq_is_full(sk) ||
1939             !nsk) {
1940                 /* error on server socket - connection refused */
1941                 if (nsk)
1942                         sk_free(nsk);
1943                 afiucv_swap_src_dest(skb);
1944                 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1945                 err = dev_queue_xmit(skb);
1946                 bh_unlock_sock(sk);
1947                 goto out;
1948         }
1949
1950         niucv = iucv_sk(nsk);
1951         iucv_sock_init(nsk, sk);
1952         niucv->transport = AF_IUCV_TRANS_HIPER;
1953         niucv->msglimit = iucv->msglimit;
1954         if (!trans_hdr->window)
1955                 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1956         else
1957                 niucv->msglimit_peer = trans_hdr->window;
1958         memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1959         memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1960         memcpy(niucv->src_name, iucv->src_name, 8);
1961         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1962         nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1963         niucv->hs_dev = iucv->hs_dev;
1964         dev_hold(niucv->hs_dev);
1965         afiucv_swap_src_dest(skb);
1966         trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1967         trans_hdr->window = niucv->msglimit;
1968         /* if receiver acks the xmit connection is established */
1969         err = dev_queue_xmit(skb);
1970         if (!err) {
1971                 iucv_accept_enqueue(sk, nsk);
1972                 nsk->sk_state = IUCV_CONNECTED;
1973                 sk->sk_data_ready(sk, 1);
1974         } else
1975                 iucv_sock_kill(nsk);
1976         bh_unlock_sock(sk);
1977
1978 out:
1979         return NET_RX_SUCCESS;
1980 }
1981
1982 /**
1983  * afiucv_hs_callback_synack() - react on received SYN-ACK
1984  **/
1985 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1986 {
1987         struct iucv_sock *iucv = iucv_sk(sk);
1988         struct af_iucv_trans_hdr *trans_hdr =
1989                                         (struct af_iucv_trans_hdr *)skb->data;
1990
1991         if (!iucv)
1992                 goto out;
1993         if (sk->sk_state != IUCV_BOUND)
1994                 goto out;
1995         bh_lock_sock(sk);
1996         iucv->msglimit_peer = trans_hdr->window;
1997         sk->sk_state = IUCV_CONNECTED;
1998         sk->sk_state_change(sk);
1999         bh_unlock_sock(sk);
2000 out:
2001         kfree_skb(skb);
2002         return NET_RX_SUCCESS;
2003 }
2004
2005 /**
2006  * afiucv_hs_callback_synfin() - react on received SYN_FIN
2007  **/
2008 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2009 {
2010         struct iucv_sock *iucv = iucv_sk(sk);
2011
2012         if (!iucv)
2013                 goto out;
2014         if (sk->sk_state != IUCV_BOUND)
2015                 goto out;
2016         bh_lock_sock(sk);
2017         sk->sk_state = IUCV_DISCONN;
2018         sk->sk_state_change(sk);
2019         bh_unlock_sock(sk);
2020 out:
2021         kfree_skb(skb);
2022         return NET_RX_SUCCESS;
2023 }
2024
2025 /**
2026  * afiucv_hs_callback_fin() - react on received FIN
2027  **/
2028 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2029 {
2030         struct iucv_sock *iucv = iucv_sk(sk);
2031
2032         /* other end of connection closed */
2033         if (!iucv)
2034                 goto out;
2035         bh_lock_sock(sk);
2036         if (sk->sk_state == IUCV_CONNECTED) {
2037                 sk->sk_state = IUCV_DISCONN;
2038                 sk->sk_state_change(sk);
2039         }
2040         bh_unlock_sock(sk);
2041 out:
2042         kfree_skb(skb);
2043         return NET_RX_SUCCESS;
2044 }
2045
2046 /**
2047  * afiucv_hs_callback_win() - react on received WIN
2048  **/
2049 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2050 {
2051         struct iucv_sock *iucv = iucv_sk(sk);
2052         struct af_iucv_trans_hdr *trans_hdr =
2053                                         (struct af_iucv_trans_hdr *)skb->data;
2054
2055         if (!iucv)
2056                 return NET_RX_SUCCESS;
2057
2058         if (sk->sk_state != IUCV_CONNECTED)
2059                 return NET_RX_SUCCESS;
2060
2061         atomic_sub(trans_hdr->window, &iucv->msg_sent);
2062         iucv_sock_wake_msglim(sk);
2063         return NET_RX_SUCCESS;
2064 }
2065
2066 /**
2067  * afiucv_hs_callback_rx() - react on received data
2068  **/
2069 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2070 {
2071         struct iucv_sock *iucv = iucv_sk(sk);
2072
2073         if (!iucv) {
2074                 kfree_skb(skb);
2075                 return NET_RX_SUCCESS;
2076         }
2077
2078         if (sk->sk_state != IUCV_CONNECTED) {
2079                 kfree_skb(skb);
2080                 return NET_RX_SUCCESS;
2081         }
2082
2083                 /* write stuff from iucv_msg to skb cb */
2084         if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2085                 kfree_skb(skb);
2086                 return NET_RX_SUCCESS;
2087         }
2088         skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2089         skb_reset_transport_header(skb);
2090         skb_reset_network_header(skb);
2091         spin_lock(&iucv->message_q.lock);
2092         if (skb_queue_empty(&iucv->backlog_skb_q)) {
2093                 if (sock_queue_rcv_skb(sk, skb)) {
2094                         /* handle rcv queue full */
2095                         skb_queue_tail(&iucv->backlog_skb_q, skb);
2096                 }
2097         } else
2098                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2099         spin_unlock(&iucv->message_q.lock);
2100         return NET_RX_SUCCESS;
2101 }
2102
2103 /**
2104  * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2105  *                   transport
2106  *                   called from netif RX softirq
2107  **/
2108 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2109         struct packet_type *pt, struct net_device *orig_dev)
2110 {
2111         struct hlist_node *node;
2112         struct sock *sk;
2113         struct iucv_sock *iucv;
2114         struct af_iucv_trans_hdr *trans_hdr;
2115         char nullstring[8];
2116         int err = 0;
2117
2118         skb_pull(skb, ETH_HLEN);
2119         trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2120         EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2121         EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2122         EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2123         EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2124         memset(nullstring, 0, sizeof(nullstring));
2125         iucv = NULL;
2126         sk = NULL;
2127         read_lock(&iucv_sk_list.lock);
2128         sk_for_each(sk, node, &iucv_sk_list.head) {
2129                 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2130                         if ((!memcmp(&iucv_sk(sk)->src_name,
2131                                      trans_hdr->destAppName, 8)) &&
2132                             (!memcmp(&iucv_sk(sk)->src_user_id,
2133                                      trans_hdr->destUserID, 8)) &&
2134                             (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2135                             (!memcmp(&iucv_sk(sk)->dst_user_id,
2136                                      nullstring, 8))) {
2137                                 iucv = iucv_sk(sk);
2138                                 break;
2139                         }
2140                 } else {
2141                         if ((!memcmp(&iucv_sk(sk)->src_name,
2142                                      trans_hdr->destAppName, 8)) &&
2143                             (!memcmp(&iucv_sk(sk)->src_user_id,
2144                                      trans_hdr->destUserID, 8)) &&
2145                             (!memcmp(&iucv_sk(sk)->dst_name,
2146                                      trans_hdr->srcAppName, 8)) &&
2147                             (!memcmp(&iucv_sk(sk)->dst_user_id,
2148                                      trans_hdr->srcUserID, 8))) {
2149                                 iucv = iucv_sk(sk);
2150                                 break;
2151                         }
2152                 }
2153         }
2154         read_unlock(&iucv_sk_list.lock);
2155         if (!iucv)
2156                 sk = NULL;
2157
2158         /* no sock
2159         how should we send with no sock
2160         1) send without sock no send rc checking?
2161         2) introduce default sock to handle this cases
2162
2163          SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2164          data -> send FIN
2165          SYN|ACK, SYN|FIN, FIN -> no action? */
2166
2167         switch (trans_hdr->flags) {
2168         case AF_IUCV_FLAG_SYN:
2169                 /* connect request */
2170                 err = afiucv_hs_callback_syn(sk, skb);
2171                 break;
2172         case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2173                 /* connect request confirmed */
2174                 err = afiucv_hs_callback_synack(sk, skb);
2175                 break;
2176         case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2177                 /* connect request refused */
2178                 err = afiucv_hs_callback_synfin(sk, skb);
2179                 break;
2180         case (AF_IUCV_FLAG_FIN):
2181                 /* close request */
2182                 err = afiucv_hs_callback_fin(sk, skb);
2183                 break;
2184         case (AF_IUCV_FLAG_WIN):
2185                 err = afiucv_hs_callback_win(sk, skb);
2186                 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2187                         kfree_skb(skb);
2188                         break;
2189                 }
2190                 /* fall through */
2191         case 0:
2192                 /* plain data frame */
2193                 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
2194                        CB_TRGCLS_LEN);
2195                 err = afiucv_hs_callback_rx(sk, skb);
2196                 break;
2197         default:
2198                 ;
2199         }
2200
2201         return err;
2202 }
2203
2204 /**
2205  * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2206  *                                 transport
2207  **/
2208 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2209                                         enum iucv_tx_notify n)
2210 {
2211         struct sock *isk = skb->sk;
2212         struct sock *sk = NULL;
2213         struct iucv_sock *iucv = NULL;
2214         struct sk_buff_head *list;
2215         struct sk_buff *list_skb;
2216         struct sk_buff *nskb;
2217         unsigned long flags;
2218         struct hlist_node *node;
2219
2220         read_lock_irqsave(&iucv_sk_list.lock, flags);
2221         sk_for_each(sk, node, &iucv_sk_list.head)
2222                 if (sk == isk) {
2223                         iucv = iucv_sk(sk);
2224                         break;
2225                 }
2226         read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2227
2228         if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2229                 return;
2230
2231         list = &iucv->send_skb_q;
2232         spin_lock_irqsave(&list->lock, flags);
2233         if (skb_queue_empty(list))
2234                 goto out_unlock;
2235         list_skb = list->next;
2236         nskb = list_skb->next;
2237         while (list_skb != (struct sk_buff *)list) {
2238                 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2239                         switch (n) {
2240                         case TX_NOTIFY_OK:
2241                                 __skb_unlink(list_skb, list);
2242                                 kfree_skb(list_skb);
2243                                 iucv_sock_wake_msglim(sk);
2244                                 break;
2245                         case TX_NOTIFY_PENDING:
2246                                 atomic_inc(&iucv->pendings);
2247                                 break;
2248                         case TX_NOTIFY_DELAYED_OK:
2249                                 __skb_unlink(list_skb, list);
2250                                 atomic_dec(&iucv->pendings);
2251                                 if (atomic_read(&iucv->pendings) <= 0)
2252                                         iucv_sock_wake_msglim(sk);
2253                                 kfree_skb(list_skb);
2254                                 break;
2255                         case TX_NOTIFY_UNREACHABLE:
2256                         case TX_NOTIFY_DELAYED_UNREACHABLE:
2257                         case TX_NOTIFY_TPQFULL: /* not yet used */
2258                         case TX_NOTIFY_GENERALERROR:
2259                         case TX_NOTIFY_DELAYED_GENERALERROR:
2260                                 __skb_unlink(list_skb, list);
2261                                 kfree_skb(list_skb);
2262                                 if (sk->sk_state == IUCV_CONNECTED) {
2263                                         sk->sk_state = IUCV_DISCONN;
2264                                         sk->sk_state_change(sk);
2265                                 }
2266                                 break;
2267                         }
2268                         break;
2269                 }
2270                 list_skb = nskb;
2271                 nskb = nskb->next;
2272         }
2273 out_unlock:
2274         spin_unlock_irqrestore(&list->lock, flags);
2275
2276         if (sk->sk_state == IUCV_CLOSING) {
2277                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2278                         sk->sk_state = IUCV_CLOSED;
2279                         sk->sk_state_change(sk);
2280                 }
2281         }
2282
2283 }
2284 static const struct proto_ops iucv_sock_ops = {
2285         .family         = PF_IUCV,
2286         .owner          = THIS_MODULE,
2287         .release        = iucv_sock_release,
2288         .bind           = iucv_sock_bind,
2289         .connect        = iucv_sock_connect,
2290         .listen         = iucv_sock_listen,
2291         .accept         = iucv_sock_accept,
2292         .getname        = iucv_sock_getname,
2293         .sendmsg        = iucv_sock_sendmsg,
2294         .recvmsg        = iucv_sock_recvmsg,
2295         .poll           = iucv_sock_poll,
2296         .ioctl          = sock_no_ioctl,
2297         .mmap           = sock_no_mmap,
2298         .socketpair     = sock_no_socketpair,
2299         .shutdown       = iucv_sock_shutdown,
2300         .setsockopt     = iucv_sock_setsockopt,
2301         .getsockopt     = iucv_sock_getsockopt,
2302 };
2303
2304 static const struct net_proto_family iucv_sock_family_ops = {
2305         .family = AF_IUCV,
2306         .owner  = THIS_MODULE,
2307         .create = iucv_sock_create,
2308 };
2309
2310 static struct packet_type iucv_packet_type = {
2311         .type = cpu_to_be16(ETH_P_AF_IUCV),
2312         .func = afiucv_hs_rcv,
2313 };
2314
2315 static int afiucv_iucv_init(void)
2316 {
2317         int err;
2318
2319         err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2320         if (err)
2321                 goto out;
2322         /* establish dummy device */
2323         af_iucv_driver.bus = pr_iucv->bus;
2324         err = driver_register(&af_iucv_driver);
2325         if (err)
2326                 goto out_iucv;
2327         af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2328         if (!af_iucv_dev) {
2329                 err = -ENOMEM;
2330                 goto out_driver;
2331         }
2332         dev_set_name(af_iucv_dev, "af_iucv");
2333         af_iucv_dev->bus = pr_iucv->bus;
2334         af_iucv_dev->parent = pr_iucv->root;
2335         af_iucv_dev->release = (void (*)(struct device *))kfree;
2336         af_iucv_dev->driver = &af_iucv_driver;
2337         err = device_register(af_iucv_dev);
2338         if (err)
2339                 goto out_driver;
2340         return 0;
2341
2342 out_driver:
2343         driver_unregister(&af_iucv_driver);
2344 out_iucv:
2345         pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2346 out:
2347         return err;
2348 }
2349
2350 static int __init afiucv_init(void)
2351 {
2352         int err;
2353
2354         if (MACHINE_IS_VM) {
2355                 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2356                 if (unlikely(err)) {
2357                         WARN_ON(err);
2358                         err = -EPROTONOSUPPORT;
2359                         goto out;
2360                 }
2361
2362                 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2363                 if (!pr_iucv) {
2364                         printk(KERN_WARNING "iucv_if lookup failed\n");
2365                         memset(&iucv_userid, 0, sizeof(iucv_userid));
2366                 }
2367         } else {
2368                 memset(&iucv_userid, 0, sizeof(iucv_userid));
2369                 pr_iucv = NULL;
2370         }
2371
2372         err = proto_register(&iucv_proto, 0);
2373         if (err)
2374                 goto out;
2375         err = sock_register(&iucv_sock_family_ops);
2376         if (err)
2377                 goto out_proto;
2378
2379         if (pr_iucv) {
2380                 err = afiucv_iucv_init();
2381                 if (err)
2382                         goto out_sock;
2383         }
2384         dev_add_pack(&iucv_packet_type);
2385         return 0;
2386
2387 out_sock:
2388         sock_unregister(PF_IUCV);
2389 out_proto:
2390         proto_unregister(&iucv_proto);
2391 out:
2392         if (pr_iucv)
2393                 symbol_put(iucv_if);
2394         return err;
2395 }
2396
2397 static void __exit afiucv_exit(void)
2398 {
2399         if (pr_iucv) {
2400                 device_unregister(af_iucv_dev);
2401                 driver_unregister(&af_iucv_driver);
2402                 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2403                 symbol_put(iucv_if);
2404         }
2405         dev_remove_pack(&iucv_packet_type);
2406         sock_unregister(PF_IUCV);
2407         proto_unregister(&iucv_proto);
2408 }
2409
2410 module_init(afiucv_init);
2411 module_exit(afiucv_exit);
2412
2413 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2414 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2415 MODULE_VERSION(VERSION);
2416 MODULE_LICENSE("GPL");
2417 MODULE_ALIAS_NETPROTO(PF_IUCV);
2418