]> git.karo-electronics.de Git - mv-sheeva.git/blob - net/iucv/af_iucv.c
af_iucv: allow retrieval of maximum message size
[mv-sheeva.git] / net / iucv / af_iucv.c
1 /*
2  *  IUCV protocol stack for Linux on zSeries
3  *
4  *  Copyright IBM Corp. 2006, 2009
5  *
6  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
7  *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8  *  PM functions:
9  *              Ursula Braun <ursula.braun@de.ibm.com>
10  */
11
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <net/sock.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
29
30 #include <net/iucv/af_iucv.h>
31
32 #define VERSION "1.2"
33
34 static char iucv_userid[80];
35
36 static const struct proto_ops iucv_sock_ops;
37
38 static struct proto iucv_proto = {
39         .name           = "AF_IUCV",
40         .owner          = THIS_MODULE,
41         .obj_size       = sizeof(struct iucv_sock),
42 };
43
44 static struct iucv_interface *pr_iucv;
45
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48         {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49
50 #define TRGCLS_SIZE     (sizeof(((struct iucv_message *)0)->class))
51
52 /* macros to set/get socket control buffer at correct offset */
53 #define CB_TAG(skb)     ((skb)->cb)             /* iucv message tag */
54 #define CB_TAG_LEN      (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb)  ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56 #define CB_TRGCLS_LEN   (TRGCLS_SIZE)
57
58 #define __iucv_sock_wait(sk, condition, timeo, ret)                     \
59 do {                                                                    \
60         DEFINE_WAIT(__wait);                                            \
61         long __timeo = timeo;                                           \
62         ret = 0;                                                        \
63         prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
64         while (!(condition)) {                                          \
65                 if (!__timeo) {                                         \
66                         ret = -EAGAIN;                                  \
67                         break;                                          \
68                 }                                                       \
69                 if (signal_pending(current)) {                          \
70                         ret = sock_intr_errno(__timeo);                 \
71                         break;                                          \
72                 }                                                       \
73                 release_sock(sk);                                       \
74                 __timeo = schedule_timeout(__timeo);                    \
75                 lock_sock(sk);                                          \
76                 ret = sock_error(sk);                                   \
77                 if (ret)                                                \
78                         break;                                          \
79         }                                                               \
80         finish_wait(sk_sleep(sk), &__wait);                             \
81 } while (0)
82
83 #define iucv_sock_wait(sk, condition, timeo)                            \
84 ({                                                                      \
85         int __ret = 0;                                                  \
86         if (!(condition))                                               \
87                 __iucv_sock_wait(sk, condition, timeo, __ret);          \
88         __ret;                                                          \
89 })
90
91 static void iucv_sock_kill(struct sock *sk);
92 static void iucv_sock_close(struct sock *sk);
93 static void iucv_sever_path(struct sock *, int);
94
95 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
96         struct packet_type *pt, struct net_device *orig_dev);
97 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
98                    struct sk_buff *skb, u8 flags);
99 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
100
101 /* Call Back functions */
102 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
103 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
104 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
105 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
106                                  u8 ipuser[16]);
107 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
108 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
109
110 static struct iucv_sock_list iucv_sk_list = {
111         .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
112         .autobind_name = ATOMIC_INIT(0)
113 };
114
115 static struct iucv_handler af_iucv_handler = {
116         .path_pending     = iucv_callback_connreq,
117         .path_complete    = iucv_callback_connack,
118         .path_severed     = iucv_callback_connrej,
119         .message_pending  = iucv_callback_rx,
120         .message_complete = iucv_callback_txdone,
121         .path_quiesced    = iucv_callback_shutdown,
122 };
123
124 static inline void high_nmcpy(unsigned char *dst, char *src)
125 {
126        memcpy(dst, src, 8);
127 }
128
129 static inline void low_nmcpy(unsigned char *dst, char *src)
130 {
131        memcpy(&dst[8], src, 8);
132 }
133
134 static int afiucv_pm_prepare(struct device *dev)
135 {
136 #ifdef CONFIG_PM_DEBUG
137         printk(KERN_WARNING "afiucv_pm_prepare\n");
138 #endif
139         return 0;
140 }
141
142 static void afiucv_pm_complete(struct device *dev)
143 {
144 #ifdef CONFIG_PM_DEBUG
145         printk(KERN_WARNING "afiucv_pm_complete\n");
146 #endif
147 }
148
149 /**
150  * afiucv_pm_freeze() - Freeze PM callback
151  * @dev:        AFIUCV dummy device
152  *
153  * Sever all established IUCV communication pathes
154  */
155 static int afiucv_pm_freeze(struct device *dev)
156 {
157         struct iucv_sock *iucv;
158         struct sock *sk;
159         struct hlist_node *node;
160         int err = 0;
161
162 #ifdef CONFIG_PM_DEBUG
163         printk(KERN_WARNING "afiucv_pm_freeze\n");
164 #endif
165         read_lock(&iucv_sk_list.lock);
166         sk_for_each(sk, node, &iucv_sk_list.head) {
167                 iucv = iucv_sk(sk);
168                 skb_queue_purge(&iucv->send_skb_q);
169                 skb_queue_purge(&iucv->backlog_skb_q);
170                 switch (sk->sk_state) {
171                 case IUCV_DISCONN:
172                 case IUCV_CLOSING:
173                 case IUCV_CONNECTED:
174                         iucv_sever_path(sk, 0);
175                         break;
176                 case IUCV_OPEN:
177                 case IUCV_BOUND:
178                 case IUCV_LISTEN:
179                 case IUCV_CLOSED:
180                 default:
181                         break;
182                 }
183                 skb_queue_purge(&iucv->send_skb_q);
184                 skb_queue_purge(&iucv->backlog_skb_q);
185         }
186         read_unlock(&iucv_sk_list.lock);
187         return err;
188 }
189
190 /**
191  * afiucv_pm_restore_thaw() - Thaw and restore PM callback
192  * @dev:        AFIUCV dummy device
193  *
194  * socket clean up after freeze
195  */
196 static int afiucv_pm_restore_thaw(struct device *dev)
197 {
198         struct sock *sk;
199         struct hlist_node *node;
200
201 #ifdef CONFIG_PM_DEBUG
202         printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
203 #endif
204         read_lock(&iucv_sk_list.lock);
205         sk_for_each(sk, node, &iucv_sk_list.head) {
206                 switch (sk->sk_state) {
207                 case IUCV_CONNECTED:
208                         sk->sk_err = EPIPE;
209                         sk->sk_state = IUCV_DISCONN;
210                         sk->sk_state_change(sk);
211                         break;
212                 case IUCV_DISCONN:
213                 case IUCV_CLOSING:
214                 case IUCV_LISTEN:
215                 case IUCV_BOUND:
216                 case IUCV_OPEN:
217                 default:
218                         break;
219                 }
220         }
221         read_unlock(&iucv_sk_list.lock);
222         return 0;
223 }
224
225 static const struct dev_pm_ops afiucv_pm_ops = {
226         .prepare = afiucv_pm_prepare,
227         .complete = afiucv_pm_complete,
228         .freeze = afiucv_pm_freeze,
229         .thaw = afiucv_pm_restore_thaw,
230         .restore = afiucv_pm_restore_thaw,
231 };
232
233 static struct device_driver af_iucv_driver = {
234         .owner = THIS_MODULE,
235         .name = "afiucv",
236         .bus  = NULL,
237         .pm   = &afiucv_pm_ops,
238 };
239
240 /* dummy device used as trigger for PM functions */
241 static struct device *af_iucv_dev;
242
243 /**
244  * iucv_msg_length() - Returns the length of an iucv message.
245  * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
246  *
247  * The function returns the length of the specified iucv message @msg of data
248  * stored in a buffer and of data stored in the parameter list (PRMDATA).
249  *
250  * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
251  * data:
252  *      PRMDATA[0..6]   socket data (max 7 bytes);
253  *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
254  *
255  * The socket data length is computed by subtracting the socket data length
256  * value from 0xFF.
257  * If the socket data len is greater 7, then PRMDATA can be used for special
258  * notifications (see iucv_sock_shutdown); and further,
259  * if the socket data len is > 7, the function returns 8.
260  *
261  * Use this function to allocate socket buffers to store iucv message data.
262  */
263 static inline size_t iucv_msg_length(struct iucv_message *msg)
264 {
265         size_t datalen;
266
267         if (msg->flags & IUCV_IPRMDATA) {
268                 datalen = 0xff - msg->rmmsg[7];
269                 return (datalen < 8) ? datalen : 8;
270         }
271         return msg->length;
272 }
273
274 /**
275  * iucv_sock_in_state() - check for specific states
276  * @sk:         sock structure
277  * @state:      first iucv sk state
278  * @state:      second iucv sk state
279  *
280  * Returns true if the socket in either in the first or second state.
281  */
282 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
283 {
284         return (sk->sk_state == state || sk->sk_state == state2);
285 }
286
287 /**
288  * iucv_below_msglim() - function to check if messages can be sent
289  * @sk:         sock structure
290  *
291  * Returns true if the send queue length is lower than the message limit.
292  * Always returns true if the socket is not connected (no iucv path for
293  * checking the message limit).
294  */
295 static inline int iucv_below_msglim(struct sock *sk)
296 {
297         struct iucv_sock *iucv = iucv_sk(sk);
298
299         if (sk->sk_state != IUCV_CONNECTED)
300                 return 1;
301         if (iucv->transport == AF_IUCV_TRANS_IUCV)
302                 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
303         else
304                 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
305                         (atomic_read(&iucv->pendings) <= 0));
306 }
307
308 /**
309  * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
310  */
311 static void iucv_sock_wake_msglim(struct sock *sk)
312 {
313         struct socket_wq *wq;
314
315         rcu_read_lock();
316         wq = rcu_dereference(sk->sk_wq);
317         if (wq_has_sleeper(wq))
318                 wake_up_interruptible_all(&wq->wait);
319         sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
320         rcu_read_unlock();
321 }
322
323 /**
324  * afiucv_hs_send() - send a message through HiperSockets transport
325  */
326 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
327                    struct sk_buff *skb, u8 flags)
328 {
329         struct iucv_sock *iucv = iucv_sk(sock);
330         struct af_iucv_trans_hdr *phs_hdr;
331         struct sk_buff *nskb;
332         int err, confirm_recv = 0;
333
334         memset(skb->head, 0, ETH_HLEN);
335         phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
336                                         sizeof(struct af_iucv_trans_hdr));
337         skb_reset_mac_header(skb);
338         skb_reset_network_header(skb);
339         skb_push(skb, ETH_HLEN);
340         skb_reset_mac_header(skb);
341         memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
342
343         phs_hdr->magic = ETH_P_AF_IUCV;
344         phs_hdr->version = 1;
345         phs_hdr->flags = flags;
346         if (flags == AF_IUCV_FLAG_SYN)
347                 phs_hdr->window = iucv->msglimit;
348         else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
349                 confirm_recv = atomic_read(&iucv->msg_recv);
350                 phs_hdr->window = confirm_recv;
351                 if (confirm_recv)
352                         phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
353         }
354         memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
355         memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
356         memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
357         memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
358         ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
359         ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
360         ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
361         ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
362         if (imsg)
363                 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
364
365         skb->dev = iucv->hs_dev;
366         if (!skb->dev)
367                 return -ENODEV;
368         if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
369                 return -ENETDOWN;
370         if (skb->len > skb->dev->mtu) {
371                 if (sock->sk_type == SOCK_SEQPACKET)
372                         return -EMSGSIZE;
373                 else
374                         skb_trim(skb, skb->dev->mtu);
375         }
376         skb->protocol = ETH_P_AF_IUCV;
377         skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
378         nskb = skb_clone(skb, GFP_ATOMIC);
379         if (!nskb)
380                 return -ENOMEM;
381         skb_queue_tail(&iucv->send_skb_q, nskb);
382         err = dev_queue_xmit(skb);
383         if (net_xmit_eval(err)) {
384                 skb_unlink(nskb, &iucv->send_skb_q);
385                 kfree_skb(nskb);
386         } else {
387                 atomic_sub(confirm_recv, &iucv->msg_recv);
388                 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
389         }
390         return net_xmit_eval(err);
391 }
392
393 static struct sock *__iucv_get_sock_by_name(char *nm)
394 {
395         struct sock *sk;
396         struct hlist_node *node;
397
398         sk_for_each(sk, node, &iucv_sk_list.head)
399                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
400                         return sk;
401
402         return NULL;
403 }
404
405 static void iucv_sock_destruct(struct sock *sk)
406 {
407         skb_queue_purge(&sk->sk_receive_queue);
408         skb_queue_purge(&sk->sk_write_queue);
409 }
410
411 /* Cleanup Listen */
412 static void iucv_sock_cleanup_listen(struct sock *parent)
413 {
414         struct sock *sk;
415
416         /* Close non-accepted connections */
417         while ((sk = iucv_accept_dequeue(parent, NULL))) {
418                 iucv_sock_close(sk);
419                 iucv_sock_kill(sk);
420         }
421
422         parent->sk_state = IUCV_CLOSED;
423 }
424
425 /* Kill socket (only if zapped and orphaned) */
426 static void iucv_sock_kill(struct sock *sk)
427 {
428         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
429                 return;
430
431         iucv_sock_unlink(&iucv_sk_list, sk);
432         sock_set_flag(sk, SOCK_DEAD);
433         sock_put(sk);
434 }
435
436 /* Terminate an IUCV path */
437 static void iucv_sever_path(struct sock *sk, int with_user_data)
438 {
439         unsigned char user_data[16];
440         struct iucv_sock *iucv = iucv_sk(sk);
441         struct iucv_path *path = iucv->path;
442
443         if (iucv->path) {
444                 iucv->path = NULL;
445                 if (with_user_data) {
446                         low_nmcpy(user_data, iucv->src_name);
447                         high_nmcpy(user_data, iucv->dst_name);
448                         ASCEBC(user_data, sizeof(user_data));
449                         pr_iucv->path_sever(path, user_data);
450                 } else
451                         pr_iucv->path_sever(path, NULL);
452                 iucv_path_free(path);
453         }
454 }
455
456 /* Close an IUCV socket */
457 static void iucv_sock_close(struct sock *sk)
458 {
459         struct iucv_sock *iucv = iucv_sk(sk);
460         unsigned long timeo;
461         int err = 0;
462         int blen;
463         struct sk_buff *skb;
464
465         lock_sock(sk);
466
467         switch (sk->sk_state) {
468         case IUCV_LISTEN:
469                 iucv_sock_cleanup_listen(sk);
470                 break;
471
472         case IUCV_CONNECTED:
473                 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
474                         /* send fin */
475                         blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
476                         skb = sock_alloc_send_skb(sk, blen, 1, &err);
477                         if (skb) {
478                                 skb_reserve(skb, blen);
479                                 err = afiucv_hs_send(NULL, sk, skb,
480                                                      AF_IUCV_FLAG_FIN);
481                         }
482                         sk->sk_state = IUCV_DISCONN;
483                         sk->sk_state_change(sk);
484                 }
485         case IUCV_DISCONN:   /* fall through */
486                 sk->sk_state = IUCV_CLOSING;
487                 sk->sk_state_change(sk);
488
489                 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
490                         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
491                                 timeo = sk->sk_lingertime;
492                         else
493                                 timeo = IUCV_DISCONN_TIMEOUT;
494                         iucv_sock_wait(sk,
495                                         iucv_sock_in_state(sk, IUCV_CLOSED, 0),
496                                         timeo);
497                 }
498
499         case IUCV_CLOSING:   /* fall through */
500                 sk->sk_state = IUCV_CLOSED;
501                 sk->sk_state_change(sk);
502
503                 sk->sk_err = ECONNRESET;
504                 sk->sk_state_change(sk);
505
506                 skb_queue_purge(&iucv->send_skb_q);
507                 skb_queue_purge(&iucv->backlog_skb_q);
508
509         default:   /* fall through */
510                 iucv_sever_path(sk, 1);
511         }
512
513         if (iucv->hs_dev) {
514                 dev_put(iucv->hs_dev);
515                 iucv->hs_dev = NULL;
516                 sk->sk_bound_dev_if = 0;
517         }
518
519         /* mark socket for deletion by iucv_sock_kill() */
520         sock_set_flag(sk, SOCK_ZAPPED);
521
522         release_sock(sk);
523 }
524
525 static void iucv_sock_init(struct sock *sk, struct sock *parent)
526 {
527         if (parent)
528                 sk->sk_type = parent->sk_type;
529 }
530
531 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
532 {
533         struct sock *sk;
534         struct iucv_sock *iucv;
535
536         sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
537         if (!sk)
538                 return NULL;
539         iucv = iucv_sk(sk);
540
541         sock_init_data(sock, sk);
542         INIT_LIST_HEAD(&iucv->accept_q);
543         spin_lock_init(&iucv->accept_q_lock);
544         skb_queue_head_init(&iucv->send_skb_q);
545         INIT_LIST_HEAD(&iucv->message_q.list);
546         spin_lock_init(&iucv->message_q.lock);
547         skb_queue_head_init(&iucv->backlog_skb_q);
548         iucv->send_tag = 0;
549         atomic_set(&iucv->pendings, 0);
550         iucv->flags = 0;
551         iucv->msglimit = 0;
552         atomic_set(&iucv->msg_sent, 0);
553         atomic_set(&iucv->msg_recv, 0);
554         iucv->path = NULL;
555         iucv->sk_txnotify = afiucv_hs_callback_txnotify;
556         memset(&iucv->src_user_id , 0, 32);
557         if (pr_iucv)
558                 iucv->transport = AF_IUCV_TRANS_IUCV;
559         else
560                 iucv->transport = AF_IUCV_TRANS_HIPER;
561
562         sk->sk_destruct = iucv_sock_destruct;
563         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
564         sk->sk_allocation = GFP_DMA;
565
566         sock_reset_flag(sk, SOCK_ZAPPED);
567
568         sk->sk_protocol = proto;
569         sk->sk_state    = IUCV_OPEN;
570
571         iucv_sock_link(&iucv_sk_list, sk);
572         return sk;
573 }
574
575 /* Create an IUCV socket */
576 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
577                             int kern)
578 {
579         struct sock *sk;
580
581         if (protocol && protocol != PF_IUCV)
582                 return -EPROTONOSUPPORT;
583
584         sock->state = SS_UNCONNECTED;
585
586         switch (sock->type) {
587         case SOCK_STREAM:
588                 sock->ops = &iucv_sock_ops;
589                 break;
590         case SOCK_SEQPACKET:
591                 /* currently, proto ops can handle both sk types */
592                 sock->ops = &iucv_sock_ops;
593                 break;
594         default:
595                 return -ESOCKTNOSUPPORT;
596         }
597
598         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
599         if (!sk)
600                 return -ENOMEM;
601
602         iucv_sock_init(sk, NULL);
603
604         return 0;
605 }
606
607 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
608 {
609         write_lock_bh(&l->lock);
610         sk_add_node(sk, &l->head);
611         write_unlock_bh(&l->lock);
612 }
613
614 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
615 {
616         write_lock_bh(&l->lock);
617         sk_del_node_init(sk);
618         write_unlock_bh(&l->lock);
619 }
620
621 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
622 {
623         unsigned long flags;
624         struct iucv_sock *par = iucv_sk(parent);
625
626         sock_hold(sk);
627         spin_lock_irqsave(&par->accept_q_lock, flags);
628         list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
629         spin_unlock_irqrestore(&par->accept_q_lock, flags);
630         iucv_sk(sk)->parent = parent;
631         sk_acceptq_added(parent);
632 }
633
634 void iucv_accept_unlink(struct sock *sk)
635 {
636         unsigned long flags;
637         struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
638
639         spin_lock_irqsave(&par->accept_q_lock, flags);
640         list_del_init(&iucv_sk(sk)->accept_q);
641         spin_unlock_irqrestore(&par->accept_q_lock, flags);
642         sk_acceptq_removed(iucv_sk(sk)->parent);
643         iucv_sk(sk)->parent = NULL;
644         sock_put(sk);
645 }
646
647 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
648 {
649         struct iucv_sock *isk, *n;
650         struct sock *sk;
651
652         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
653                 sk = (struct sock *) isk;
654                 lock_sock(sk);
655
656                 if (sk->sk_state == IUCV_CLOSED) {
657                         iucv_accept_unlink(sk);
658                         release_sock(sk);
659                         continue;
660                 }
661
662                 if (sk->sk_state == IUCV_CONNECTED ||
663                     sk->sk_state == IUCV_DISCONN ||
664                     !newsock) {
665                         iucv_accept_unlink(sk);
666                         if (newsock)
667                                 sock_graft(sk, newsock);
668
669                         release_sock(sk);
670                         return sk;
671                 }
672
673                 release_sock(sk);
674         }
675         return NULL;
676 }
677
678 /* Bind an unbound socket */
679 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
680                           int addr_len)
681 {
682         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
683         struct sock *sk = sock->sk;
684         struct iucv_sock *iucv;
685         int err = 0;
686         struct net_device *dev;
687         char uid[9];
688
689         /* Verify the input sockaddr */
690         if (!addr || addr->sa_family != AF_IUCV)
691                 return -EINVAL;
692
693         lock_sock(sk);
694         if (sk->sk_state != IUCV_OPEN) {
695                 err = -EBADFD;
696                 goto done;
697         }
698
699         write_lock_bh(&iucv_sk_list.lock);
700
701         iucv = iucv_sk(sk);
702         if (__iucv_get_sock_by_name(sa->siucv_name)) {
703                 err = -EADDRINUSE;
704                 goto done_unlock;
705         }
706         if (iucv->path)
707                 goto done_unlock;
708
709         /* Bind the socket */
710         if (pr_iucv)
711                 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
712                         goto vm_bind; /* VM IUCV transport */
713
714         /* try hiper transport */
715         memcpy(uid, sa->siucv_user_id, sizeof(uid));
716         ASCEBC(uid, 8);
717         rcu_read_lock();
718         for_each_netdev_rcu(&init_net, dev) {
719                 if (!memcmp(dev->perm_addr, uid, 8)) {
720                         memcpy(iucv->src_name, sa->siucv_name, 8);
721                         memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
722                         sk->sk_bound_dev_if = dev->ifindex;
723                         iucv->hs_dev = dev;
724                         dev_hold(dev);
725                         sk->sk_state = IUCV_BOUND;
726                         iucv->transport = AF_IUCV_TRANS_HIPER;
727                         if (!iucv->msglimit)
728                                 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
729                         rcu_read_unlock();
730                         goto done_unlock;
731                 }
732         }
733         rcu_read_unlock();
734 vm_bind:
735         if (pr_iucv) {
736                 /* use local userid for backward compat */
737                 memcpy(iucv->src_name, sa->siucv_name, 8);
738                 memcpy(iucv->src_user_id, iucv_userid, 8);
739                 sk->sk_state = IUCV_BOUND;
740                 iucv->transport = AF_IUCV_TRANS_IUCV;
741                 if (!iucv->msglimit)
742                         iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
743                 goto done_unlock;
744         }
745         /* found no dev to bind */
746         err = -ENODEV;
747 done_unlock:
748         /* Release the socket list lock */
749         write_unlock_bh(&iucv_sk_list.lock);
750 done:
751         release_sock(sk);
752         return err;
753 }
754
755 /* Automatically bind an unbound socket */
756 static int iucv_sock_autobind(struct sock *sk)
757 {
758         struct iucv_sock *iucv = iucv_sk(sk);
759         char name[12];
760         int err = 0;
761
762         if (unlikely(!pr_iucv))
763                 return -EPROTO;
764
765         memcpy(iucv->src_user_id, iucv_userid, 8);
766
767         write_lock_bh(&iucv_sk_list.lock);
768
769         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
770         while (__iucv_get_sock_by_name(name)) {
771                 sprintf(name, "%08x",
772                         atomic_inc_return(&iucv_sk_list.autobind_name));
773         }
774
775         write_unlock_bh(&iucv_sk_list.lock);
776
777         memcpy(&iucv->src_name, name, 8);
778
779         if (!iucv->msglimit)
780                 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
781
782         return err;
783 }
784
785 static int afiucv_hs_connect(struct socket *sock)
786 {
787         struct sock *sk = sock->sk;
788         struct sk_buff *skb;
789         int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
790         int err = 0;
791
792         /* send syn */
793         skb = sock_alloc_send_skb(sk, blen, 1, &err);
794         if (!skb) {
795                 err = -ENOMEM;
796                 goto done;
797         }
798         skb->dev = NULL;
799         skb_reserve(skb, blen);
800         err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
801 done:
802         return err;
803 }
804
805 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
806 {
807         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
808         struct sock *sk = sock->sk;
809         struct iucv_sock *iucv = iucv_sk(sk);
810         unsigned char user_data[16];
811         int err;
812
813         high_nmcpy(user_data, sa->siucv_name);
814         low_nmcpy(user_data, iucv->src_name);
815         ASCEBC(user_data, sizeof(user_data));
816
817         /* Create path. */
818         iucv->path = iucv_path_alloc(iucv->msglimit,
819                                      IUCV_IPRMDATA, GFP_KERNEL);
820         if (!iucv->path) {
821                 err = -ENOMEM;
822                 goto done;
823         }
824         err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
825                                     sa->siucv_user_id, NULL, user_data,
826                                     sk);
827         if (err) {
828                 iucv_path_free(iucv->path);
829                 iucv->path = NULL;
830                 switch (err) {
831                 case 0x0b:      /* Target communicator is not logged on */
832                         err = -ENETUNREACH;
833                         break;
834                 case 0x0d:      /* Max connections for this guest exceeded */
835                 case 0x0e:      /* Max connections for target guest exceeded */
836                         err = -EAGAIN;
837                         break;
838                 case 0x0f:      /* Missing IUCV authorization */
839                         err = -EACCES;
840                         break;
841                 default:
842                         err = -ECONNREFUSED;
843                         break;
844                 }
845         }
846 done:
847         return err;
848 }
849
850 /* Connect an unconnected socket */
851 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
852                              int alen, int flags)
853 {
854         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
855         struct sock *sk = sock->sk;
856         struct iucv_sock *iucv = iucv_sk(sk);
857         int err;
858
859         if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
860                 return -EINVAL;
861
862         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
863                 return -EBADFD;
864
865         if (sk->sk_state == IUCV_OPEN &&
866             iucv->transport == AF_IUCV_TRANS_HIPER)
867                 return -EBADFD; /* explicit bind required */
868
869         if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
870                 return -EINVAL;
871
872         if (sk->sk_state == IUCV_OPEN) {
873                 err = iucv_sock_autobind(sk);
874                 if (unlikely(err))
875                         return err;
876         }
877
878         lock_sock(sk);
879
880         /* Set the destination information */
881         memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
882         memcpy(iucv->dst_name, sa->siucv_name, 8);
883
884         if (iucv->transport == AF_IUCV_TRANS_HIPER)
885                 err = afiucv_hs_connect(sock);
886         else
887                 err = afiucv_path_connect(sock, addr);
888         if (err)
889                 goto done;
890
891         if (sk->sk_state != IUCV_CONNECTED)
892                 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
893                                                             IUCV_DISCONN),
894                                      sock_sndtimeo(sk, flags & O_NONBLOCK));
895
896         if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
897                 err = -ECONNREFUSED;
898
899         if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
900                 iucv_sever_path(sk, 0);
901
902 done:
903         release_sock(sk);
904         return err;
905 }
906
907 /* Move a socket into listening state. */
908 static int iucv_sock_listen(struct socket *sock, int backlog)
909 {
910         struct sock *sk = sock->sk;
911         int err;
912
913         lock_sock(sk);
914
915         err = -EINVAL;
916         if (sk->sk_state != IUCV_BOUND)
917                 goto done;
918
919         if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
920                 goto done;
921
922         sk->sk_max_ack_backlog = backlog;
923         sk->sk_ack_backlog = 0;
924         sk->sk_state = IUCV_LISTEN;
925         err = 0;
926
927 done:
928         release_sock(sk);
929         return err;
930 }
931
932 /* Accept a pending connection */
933 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
934                             int flags)
935 {
936         DECLARE_WAITQUEUE(wait, current);
937         struct sock *sk = sock->sk, *nsk;
938         long timeo;
939         int err = 0;
940
941         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
942
943         if (sk->sk_state != IUCV_LISTEN) {
944                 err = -EBADFD;
945                 goto done;
946         }
947
948         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
949
950         /* Wait for an incoming connection */
951         add_wait_queue_exclusive(sk_sleep(sk), &wait);
952         while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
953                 set_current_state(TASK_INTERRUPTIBLE);
954                 if (!timeo) {
955                         err = -EAGAIN;
956                         break;
957                 }
958
959                 release_sock(sk);
960                 timeo = schedule_timeout(timeo);
961                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
962
963                 if (sk->sk_state != IUCV_LISTEN) {
964                         err = -EBADFD;
965                         break;
966                 }
967
968                 if (signal_pending(current)) {
969                         err = sock_intr_errno(timeo);
970                         break;
971                 }
972         }
973
974         set_current_state(TASK_RUNNING);
975         remove_wait_queue(sk_sleep(sk), &wait);
976
977         if (err)
978                 goto done;
979
980         newsock->state = SS_CONNECTED;
981
982 done:
983         release_sock(sk);
984         return err;
985 }
986
987 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
988                              int *len, int peer)
989 {
990         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
991         struct sock *sk = sock->sk;
992         struct iucv_sock *iucv = iucv_sk(sk);
993
994         addr->sa_family = AF_IUCV;
995         *len = sizeof(struct sockaddr_iucv);
996
997         if (peer) {
998                 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
999                 memcpy(siucv->siucv_name, iucv->dst_name, 8);
1000         } else {
1001                 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1002                 memcpy(siucv->siucv_name, iucv->src_name, 8);
1003         }
1004         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1005         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1006         memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1007
1008         return 0;
1009 }
1010
1011 /**
1012  * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1013  * @path:       IUCV path
1014  * @msg:        Pointer to a struct iucv_message
1015  * @skb:        The socket data to send, skb->len MUST BE <= 7
1016  *
1017  * Send the socket data in the parameter list in the iucv message
1018  * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1019  * list and the socket data len at index 7 (last byte).
1020  * See also iucv_msg_length().
1021  *
1022  * Returns the error code from the iucv_message_send() call.
1023  */
1024 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1025                           struct sk_buff *skb)
1026 {
1027         u8 prmdata[8];
1028
1029         memcpy(prmdata, (void *) skb->data, skb->len);
1030         prmdata[7] = 0xff - (u8) skb->len;
1031         return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1032                                  (void *) prmdata, 8);
1033 }
1034
1035 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1036                              struct msghdr *msg, size_t len)
1037 {
1038         struct sock *sk = sock->sk;
1039         struct iucv_sock *iucv = iucv_sk(sk);
1040         struct sk_buff *skb;
1041         struct iucv_message txmsg;
1042         struct cmsghdr *cmsg;
1043         int cmsg_done;
1044         long timeo;
1045         char user_id[9];
1046         char appl_id[9];
1047         int err;
1048         int noblock = msg->msg_flags & MSG_DONTWAIT;
1049
1050         err = sock_error(sk);
1051         if (err)
1052                 return err;
1053
1054         if (msg->msg_flags & MSG_OOB)
1055                 return -EOPNOTSUPP;
1056
1057         /* SOCK_SEQPACKET: we do not support segmented records */
1058         if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1059                 return -EOPNOTSUPP;
1060
1061         lock_sock(sk);
1062
1063         if (sk->sk_shutdown & SEND_SHUTDOWN) {
1064                 err = -EPIPE;
1065                 goto out;
1066         }
1067
1068         /* Return if the socket is not in connected state */
1069         if (sk->sk_state != IUCV_CONNECTED) {
1070                 err = -ENOTCONN;
1071                 goto out;
1072         }
1073
1074         /* initialize defaults */
1075         cmsg_done   = 0;        /* check for duplicate headers */
1076         txmsg.class = 0;
1077
1078         /* iterate over control messages */
1079         for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1080                 cmsg = CMSG_NXTHDR(msg, cmsg)) {
1081
1082                 if (!CMSG_OK(msg, cmsg)) {
1083                         err = -EINVAL;
1084                         goto out;
1085                 }
1086
1087                 if (cmsg->cmsg_level != SOL_IUCV)
1088                         continue;
1089
1090                 if (cmsg->cmsg_type & cmsg_done) {
1091                         err = -EINVAL;
1092                         goto out;
1093                 }
1094                 cmsg_done |= cmsg->cmsg_type;
1095
1096                 switch (cmsg->cmsg_type) {
1097                 case SCM_IUCV_TRGCLS:
1098                         if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1099                                 err = -EINVAL;
1100                                 goto out;
1101                         }
1102
1103                         /* set iucv message target class */
1104                         memcpy(&txmsg.class,
1105                                 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1106
1107                         break;
1108
1109                 default:
1110                         err = -EINVAL;
1111                         goto out;
1112                         break;
1113                 }
1114         }
1115
1116         /* allocate one skb for each iucv message:
1117          * this is fine for SOCK_SEQPACKET (unless we want to support
1118          * segmented records using the MSG_EOR flag), but
1119          * for SOCK_STREAM we might want to improve it in future */
1120         if (iucv->transport == AF_IUCV_TRANS_HIPER)
1121                 skb = sock_alloc_send_skb(sk,
1122                         len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1123                         noblock, &err);
1124         else
1125                 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1126         if (!skb) {
1127                 err = -ENOMEM;
1128                 goto out;
1129         }
1130         if (iucv->transport == AF_IUCV_TRANS_HIPER)
1131                 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1132         if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1133                 err = -EFAULT;
1134                 goto fail;
1135         }
1136
1137         /* wait if outstanding messages for iucv path has reached */
1138         timeo = sock_sndtimeo(sk, noblock);
1139         err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1140         if (err)
1141                 goto fail;
1142
1143         /* return -ECONNRESET if the socket is no longer connected */
1144         if (sk->sk_state != IUCV_CONNECTED) {
1145                 err = -ECONNRESET;
1146                 goto fail;
1147         }
1148
1149         /* increment and save iucv message tag for msg_completion cbk */
1150         txmsg.tag = iucv->send_tag++;
1151         memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1152
1153         if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1154                 atomic_inc(&iucv->msg_sent);
1155                 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1156                 if (err) {
1157                         atomic_dec(&iucv->msg_sent);
1158                         goto fail;
1159                 }
1160                 goto release;
1161         }
1162         skb_queue_tail(&iucv->send_skb_q, skb);
1163
1164         if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1165               && skb->len <= 7) {
1166                 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1167
1168                 /* on success: there is no message_complete callback
1169                  * for an IPRMDATA msg; remove skb from send queue */
1170                 if (err == 0) {
1171                         skb_unlink(skb, &iucv->send_skb_q);
1172                         kfree_skb(skb);
1173                 }
1174
1175                 /* this error should never happen since the
1176                  * IUCV_IPRMDATA path flag is set... sever path */
1177                 if (err == 0x15) {
1178                         pr_iucv->path_sever(iucv->path, NULL);
1179                         skb_unlink(skb, &iucv->send_skb_q);
1180                         err = -EPIPE;
1181                         goto fail;
1182                 }
1183         } else
1184                 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1185                                         (void *) skb->data, skb->len);
1186         if (err) {
1187                 if (err == 3) {
1188                         user_id[8] = 0;
1189                         memcpy(user_id, iucv->dst_user_id, 8);
1190                         appl_id[8] = 0;
1191                         memcpy(appl_id, iucv->dst_name, 8);
1192                         pr_err("Application %s on z/VM guest %s"
1193                                 " exceeds message limit\n",
1194                                 appl_id, user_id);
1195                         err = -EAGAIN;
1196                 } else
1197                         err = -EPIPE;
1198                 skb_unlink(skb, &iucv->send_skb_q);
1199                 goto fail;
1200         }
1201
1202 release:
1203         release_sock(sk);
1204         return len;
1205
1206 fail:
1207         kfree_skb(skb);
1208 out:
1209         release_sock(sk);
1210         return err;
1211 }
1212
1213 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1214  *
1215  * Locking: must be called with message_q.lock held
1216  */
1217 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1218 {
1219         int dataleft, size, copied = 0;
1220         struct sk_buff *nskb;
1221
1222         dataleft = len;
1223         while (dataleft) {
1224                 if (dataleft >= sk->sk_rcvbuf / 4)
1225                         size = sk->sk_rcvbuf / 4;
1226                 else
1227                         size = dataleft;
1228
1229                 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1230                 if (!nskb)
1231                         return -ENOMEM;
1232
1233                 /* copy target class to control buffer of new skb */
1234                 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1235
1236                 /* copy data fragment */
1237                 memcpy(nskb->data, skb->data + copied, size);
1238                 copied += size;
1239                 dataleft -= size;
1240
1241                 skb_reset_transport_header(nskb);
1242                 skb_reset_network_header(nskb);
1243                 nskb->len = size;
1244
1245                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1246         }
1247
1248         return 0;
1249 }
1250
1251 /* iucv_process_message() - Receive a single outstanding IUCV message
1252  *
1253  * Locking: must be called with message_q.lock held
1254  */
1255 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256                                  struct iucv_path *path,
1257                                  struct iucv_message *msg)
1258 {
1259         int rc;
1260         unsigned int len;
1261
1262         len = iucv_msg_length(msg);
1263
1264         /* store msg target class in the second 4 bytes of skb ctrl buffer */
1265         /* Note: the first 4 bytes are reserved for msg tag */
1266         memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1267
1268         /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1269         if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1270                 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1271                         skb->data = NULL;
1272                         skb->len = 0;
1273                 }
1274         } else {
1275                 rc = pr_iucv->message_receive(path, msg,
1276                                               msg->flags & IUCV_IPRMDATA,
1277                                               skb->data, len, NULL);
1278                 if (rc) {
1279                         kfree_skb(skb);
1280                         return;
1281                 }
1282                 /* we need to fragment iucv messages for SOCK_STREAM only;
1283                  * for SOCK_SEQPACKET, it is only relevant if we support
1284                  * record segmentation using MSG_EOR (see also recvmsg()) */
1285                 if (sk->sk_type == SOCK_STREAM &&
1286                     skb->truesize >= sk->sk_rcvbuf / 4) {
1287                         rc = iucv_fragment_skb(sk, skb, len);
1288                         kfree_skb(skb);
1289                         skb = NULL;
1290                         if (rc) {
1291                                 pr_iucv->path_sever(path, NULL);
1292                                 return;
1293                         }
1294                         skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1295                 } else {
1296                         skb_reset_transport_header(skb);
1297                         skb_reset_network_header(skb);
1298                         skb->len = len;
1299                 }
1300         }
1301
1302         if (sock_queue_rcv_skb(sk, skb))
1303                 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1304 }
1305
1306 /* iucv_process_message_q() - Process outstanding IUCV messages
1307  *
1308  * Locking: must be called with message_q.lock held
1309  */
1310 static void iucv_process_message_q(struct sock *sk)
1311 {
1312         struct iucv_sock *iucv = iucv_sk(sk);
1313         struct sk_buff *skb;
1314         struct sock_msg_q *p, *n;
1315
1316         list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1317                 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1318                 if (!skb)
1319                         break;
1320                 iucv_process_message(sk, skb, p->path, &p->msg);
1321                 list_del(&p->list);
1322                 kfree(p);
1323                 if (!skb_queue_empty(&iucv->backlog_skb_q))
1324                         break;
1325         }
1326 }
1327
1328 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1329                              struct msghdr *msg, size_t len, int flags)
1330 {
1331         int noblock = flags & MSG_DONTWAIT;
1332         struct sock *sk = sock->sk;
1333         struct iucv_sock *iucv = iucv_sk(sk);
1334         unsigned int copied, rlen;
1335         struct sk_buff *skb, *rskb, *cskb, *sskb;
1336         int blen;
1337         int err = 0;
1338
1339         if ((sk->sk_state == IUCV_DISCONN) &&
1340             skb_queue_empty(&iucv->backlog_skb_q) &&
1341             skb_queue_empty(&sk->sk_receive_queue) &&
1342             list_empty(&iucv->message_q.list))
1343                 return 0;
1344
1345         if (flags & (MSG_OOB))
1346                 return -EOPNOTSUPP;
1347
1348         /* receive/dequeue next skb:
1349          * the function understands MSG_PEEK and, thus, does not dequeue skb */
1350         skb = skb_recv_datagram(sk, flags, noblock, &err);
1351         if (!skb) {
1352                 if (sk->sk_shutdown & RCV_SHUTDOWN)
1353                         return 0;
1354                 return err;
1355         }
1356
1357         rlen   = skb->len;              /* real length of skb */
1358         copied = min_t(unsigned int, rlen, len);
1359
1360         cskb = skb;
1361         if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1362                 if (!(flags & MSG_PEEK))
1363                         skb_queue_head(&sk->sk_receive_queue, skb);
1364                 return -EFAULT;
1365         }
1366
1367         /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1368         if (sk->sk_type == SOCK_SEQPACKET) {
1369                 if (copied < rlen)
1370                         msg->msg_flags |= MSG_TRUNC;
1371                 /* each iucv message contains a complete record */
1372                 msg->msg_flags |= MSG_EOR;
1373         }
1374
1375         /* create control message to store iucv msg target class:
1376          * get the trgcls from the control buffer of the skb due to
1377          * fragmentation of original iucv message. */
1378         err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1379                         CB_TRGCLS_LEN, CB_TRGCLS(skb));
1380         if (err) {
1381                 if (!(flags & MSG_PEEK))
1382                         skb_queue_head(&sk->sk_receive_queue, skb);
1383                 return err;
1384         }
1385
1386         /* Mark read part of skb as used */
1387         if (!(flags & MSG_PEEK)) {
1388
1389                 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1390                 if (sk->sk_type == SOCK_STREAM) {
1391                         skb_pull(skb, copied);
1392                         if (skb->len) {
1393                                 skb_queue_head(&sk->sk_receive_queue, skb);
1394                                 goto done;
1395                         }
1396                 }
1397
1398                 kfree_skb(skb);
1399                 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1400                         atomic_inc(&iucv->msg_recv);
1401                         if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1402                                 WARN_ON(1);
1403                                 iucv_sock_close(sk);
1404                                 return -EFAULT;
1405                         }
1406                 }
1407
1408                 /* Queue backlog skbs */
1409                 spin_lock_bh(&iucv->message_q.lock);
1410                 rskb = skb_dequeue(&iucv->backlog_skb_q);
1411                 while (rskb) {
1412                         if (sock_queue_rcv_skb(sk, rskb)) {
1413                                 skb_queue_head(&iucv->backlog_skb_q,
1414                                                 rskb);
1415                                 break;
1416                         } else {
1417                                 rskb = skb_dequeue(&iucv->backlog_skb_q);
1418                         }
1419                 }
1420                 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1421                         if (!list_empty(&iucv->message_q.list))
1422                                 iucv_process_message_q(sk);
1423                         if (atomic_read(&iucv->msg_recv) >=
1424                                                         iucv->msglimit / 2) {
1425                                 /* send WIN to peer */
1426                                 blen = sizeof(struct af_iucv_trans_hdr) +
1427                                         ETH_HLEN;
1428                                 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1429                                 if (sskb) {
1430                                         skb_reserve(sskb, blen);
1431                                         err = afiucv_hs_send(NULL, sk, sskb,
1432                                                              AF_IUCV_FLAG_WIN);
1433                                 }
1434                                 if (err) {
1435                                         sk->sk_state = IUCV_DISCONN;
1436                                         sk->sk_state_change(sk);
1437                                 }
1438                         }
1439                 }
1440                 spin_unlock_bh(&iucv->message_q.lock);
1441         }
1442
1443 done:
1444         /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1445         if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1446                 copied = rlen;
1447
1448         return copied;
1449 }
1450
1451 static inline unsigned int iucv_accept_poll(struct sock *parent)
1452 {
1453         struct iucv_sock *isk, *n;
1454         struct sock *sk;
1455
1456         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1457                 sk = (struct sock *) isk;
1458
1459                 if (sk->sk_state == IUCV_CONNECTED)
1460                         return POLLIN | POLLRDNORM;
1461         }
1462
1463         return 0;
1464 }
1465
1466 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1467                             poll_table *wait)
1468 {
1469         struct sock *sk = sock->sk;
1470         unsigned int mask = 0;
1471
1472         sock_poll_wait(file, sk_sleep(sk), wait);
1473
1474         if (sk->sk_state == IUCV_LISTEN)
1475                 return iucv_accept_poll(sk);
1476
1477         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1478                 mask |= POLLERR;
1479
1480         if (sk->sk_shutdown & RCV_SHUTDOWN)
1481                 mask |= POLLRDHUP;
1482
1483         if (sk->sk_shutdown == SHUTDOWN_MASK)
1484                 mask |= POLLHUP;
1485
1486         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1487             (sk->sk_shutdown & RCV_SHUTDOWN))
1488                 mask |= POLLIN | POLLRDNORM;
1489
1490         if (sk->sk_state == IUCV_CLOSED)
1491                 mask |= POLLHUP;
1492
1493         if (sk->sk_state == IUCV_DISCONN)
1494                 mask |= POLLIN;
1495
1496         if (sock_writeable(sk) && iucv_below_msglim(sk))
1497                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1498         else
1499                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1500
1501         return mask;
1502 }
1503
1504 static int iucv_sock_shutdown(struct socket *sock, int how)
1505 {
1506         struct sock *sk = sock->sk;
1507         struct iucv_sock *iucv = iucv_sk(sk);
1508         struct iucv_message txmsg;
1509         int err = 0;
1510
1511         how++;
1512
1513         if ((how & ~SHUTDOWN_MASK) || !how)
1514                 return -EINVAL;
1515
1516         lock_sock(sk);
1517         switch (sk->sk_state) {
1518         case IUCV_DISCONN:
1519         case IUCV_CLOSING:
1520         case IUCV_CLOSED:
1521                 err = -ENOTCONN;
1522                 goto fail;
1523
1524         default:
1525                 sk->sk_shutdown |= how;
1526                 break;
1527         }
1528
1529         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1530                 txmsg.class = 0;
1531                 txmsg.tag = 0;
1532                 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1533                                         0, (void *) iprm_shutdown, 8);
1534                 if (err) {
1535                         switch (err) {
1536                         case 1:
1537                                 err = -ENOTCONN;
1538                                 break;
1539                         case 2:
1540                                 err = -ECONNRESET;
1541                                 break;
1542                         default:
1543                                 err = -ENOTCONN;
1544                                 break;
1545                         }
1546                 }
1547         }
1548
1549         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1550                 err = pr_iucv->path_quiesce(iucv->path, NULL);
1551                 if (err)
1552                         err = -ENOTCONN;
1553
1554                 skb_queue_purge(&sk->sk_receive_queue);
1555         }
1556
1557         /* Wake up anyone sleeping in poll */
1558         sk->sk_state_change(sk);
1559
1560 fail:
1561         release_sock(sk);
1562         return err;
1563 }
1564
1565 static int iucv_sock_release(struct socket *sock)
1566 {
1567         struct sock *sk = sock->sk;
1568         int err = 0;
1569
1570         if (!sk)
1571                 return 0;
1572
1573         iucv_sock_close(sk);
1574
1575         sock_orphan(sk);
1576         iucv_sock_kill(sk);
1577         return err;
1578 }
1579
1580 /* getsockopt and setsockopt */
1581 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1582                                 char __user *optval, unsigned int optlen)
1583 {
1584         struct sock *sk = sock->sk;
1585         struct iucv_sock *iucv = iucv_sk(sk);
1586         int val;
1587         int rc;
1588
1589         if (level != SOL_IUCV)
1590                 return -ENOPROTOOPT;
1591
1592         if (optlen < sizeof(int))
1593                 return -EINVAL;
1594
1595         if (get_user(val, (int __user *) optval))
1596                 return -EFAULT;
1597
1598         rc = 0;
1599
1600         lock_sock(sk);
1601         switch (optname) {
1602         case SO_IPRMDATA_MSG:
1603                 if (val)
1604                         iucv->flags |= IUCV_IPRMDATA;
1605                 else
1606                         iucv->flags &= ~IUCV_IPRMDATA;
1607                 break;
1608         case SO_MSGLIMIT:
1609                 switch (sk->sk_state) {
1610                 case IUCV_OPEN:
1611                 case IUCV_BOUND:
1612                         if (val < 1 || val > (u16)(~0))
1613                                 rc = -EINVAL;
1614                         else
1615                                 iucv->msglimit = val;
1616                         break;
1617                 default:
1618                         rc = -EINVAL;
1619                         break;
1620                 }
1621                 break;
1622         default:
1623                 rc = -ENOPROTOOPT;
1624                 break;
1625         }
1626         release_sock(sk);
1627
1628         return rc;
1629 }
1630
1631 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1632                                 char __user *optval, int __user *optlen)
1633 {
1634         struct sock *sk = sock->sk;
1635         struct iucv_sock *iucv = iucv_sk(sk);
1636         unsigned int val;
1637         int len;
1638
1639         if (level != SOL_IUCV)
1640                 return -ENOPROTOOPT;
1641
1642         if (get_user(len, optlen))
1643                 return -EFAULT;
1644
1645         if (len < 0)
1646                 return -EINVAL;
1647
1648         len = min_t(unsigned int, len, sizeof(int));
1649
1650         switch (optname) {
1651         case SO_IPRMDATA_MSG:
1652                 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1653                 break;
1654         case SO_MSGLIMIT:
1655                 lock_sock(sk);
1656                 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1657                                            : iucv->msglimit;    /* default */
1658                 release_sock(sk);
1659                 break;
1660         case SO_MSGSIZE:
1661                 if (sk->sk_state == IUCV_OPEN)
1662                         return -EBADFD;
1663                 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1664                                 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1665                                 0x7fffffff;
1666                 break;
1667         default:
1668                 return -ENOPROTOOPT;
1669         }
1670
1671         if (put_user(len, optlen))
1672                 return -EFAULT;
1673         if (copy_to_user(optval, &val, len))
1674                 return -EFAULT;
1675
1676         return 0;
1677 }
1678
1679
1680 /* Callback wrappers - called from iucv base support */
1681 static int iucv_callback_connreq(struct iucv_path *path,
1682                                  u8 ipvmid[8], u8 ipuser[16])
1683 {
1684         unsigned char user_data[16];
1685         unsigned char nuser_data[16];
1686         unsigned char src_name[8];
1687         struct hlist_node *node;
1688         struct sock *sk, *nsk;
1689         struct iucv_sock *iucv, *niucv;
1690         int err;
1691
1692         memcpy(src_name, ipuser, 8);
1693         EBCASC(src_name, 8);
1694         /* Find out if this path belongs to af_iucv. */
1695         read_lock(&iucv_sk_list.lock);
1696         iucv = NULL;
1697         sk = NULL;
1698         sk_for_each(sk, node, &iucv_sk_list.head)
1699                 if (sk->sk_state == IUCV_LISTEN &&
1700                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1701                         /*
1702                          * Found a listening socket with
1703                          * src_name == ipuser[0-7].
1704                          */
1705                         iucv = iucv_sk(sk);
1706                         break;
1707                 }
1708         read_unlock(&iucv_sk_list.lock);
1709         if (!iucv)
1710                 /* No socket found, not one of our paths. */
1711                 return -EINVAL;
1712
1713         bh_lock_sock(sk);
1714
1715         /* Check if parent socket is listening */
1716         low_nmcpy(user_data, iucv->src_name);
1717         high_nmcpy(user_data, iucv->dst_name);
1718         ASCEBC(user_data, sizeof(user_data));
1719         if (sk->sk_state != IUCV_LISTEN) {
1720                 err = pr_iucv->path_sever(path, user_data);
1721                 iucv_path_free(path);
1722                 goto fail;
1723         }
1724
1725         /* Check for backlog size */
1726         if (sk_acceptq_is_full(sk)) {
1727                 err = pr_iucv->path_sever(path, user_data);
1728                 iucv_path_free(path);
1729                 goto fail;
1730         }
1731
1732         /* Create the new socket */
1733         nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1734         if (!nsk) {
1735                 err = pr_iucv->path_sever(path, user_data);
1736                 iucv_path_free(path);
1737                 goto fail;
1738         }
1739
1740         niucv = iucv_sk(nsk);
1741         iucv_sock_init(nsk, sk);
1742
1743         /* Set the new iucv_sock */
1744         memcpy(niucv->dst_name, ipuser + 8, 8);
1745         EBCASC(niucv->dst_name, 8);
1746         memcpy(niucv->dst_user_id, ipvmid, 8);
1747         memcpy(niucv->src_name, iucv->src_name, 8);
1748         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1749         niucv->path = path;
1750
1751         /* Call iucv_accept */
1752         high_nmcpy(nuser_data, ipuser + 8);
1753         memcpy(nuser_data + 8, niucv->src_name, 8);
1754         ASCEBC(nuser_data + 8, 8);
1755
1756         /* set message limit for path based on msglimit of accepting socket */
1757         niucv->msglimit = iucv->msglimit;
1758         path->msglim = iucv->msglimit;
1759         err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1760         if (err) {
1761                 iucv_sever_path(nsk, 1);
1762                 iucv_sock_kill(nsk);
1763                 goto fail;
1764         }
1765
1766         iucv_accept_enqueue(sk, nsk);
1767
1768         /* Wake up accept */
1769         nsk->sk_state = IUCV_CONNECTED;
1770         sk->sk_data_ready(sk, 1);
1771         err = 0;
1772 fail:
1773         bh_unlock_sock(sk);
1774         return 0;
1775 }
1776
1777 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1778 {
1779         struct sock *sk = path->private;
1780
1781         sk->sk_state = IUCV_CONNECTED;
1782         sk->sk_state_change(sk);
1783 }
1784
1785 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1786 {
1787         struct sock *sk = path->private;
1788         struct iucv_sock *iucv = iucv_sk(sk);
1789         struct sk_buff *skb;
1790         struct sock_msg_q *save_msg;
1791         int len;
1792
1793         if (sk->sk_shutdown & RCV_SHUTDOWN) {
1794                 pr_iucv->message_reject(path, msg);
1795                 return;
1796         }
1797
1798         spin_lock(&iucv->message_q.lock);
1799
1800         if (!list_empty(&iucv->message_q.list) ||
1801             !skb_queue_empty(&iucv->backlog_skb_q))
1802                 goto save_message;
1803
1804         len = atomic_read(&sk->sk_rmem_alloc);
1805         len += SKB_TRUESIZE(iucv_msg_length(msg));
1806         if (len > sk->sk_rcvbuf)
1807                 goto save_message;
1808
1809         skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1810         if (!skb)
1811                 goto save_message;
1812
1813         iucv_process_message(sk, skb, path, msg);
1814         goto out_unlock;
1815
1816 save_message:
1817         save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1818         if (!save_msg)
1819                 goto out_unlock;
1820         save_msg->path = path;
1821         save_msg->msg = *msg;
1822
1823         list_add_tail(&save_msg->list, &iucv->message_q.list);
1824
1825 out_unlock:
1826         spin_unlock(&iucv->message_q.lock);
1827 }
1828
1829 static void iucv_callback_txdone(struct iucv_path *path,
1830                                  struct iucv_message *msg)
1831 {
1832         struct sock *sk = path->private;
1833         struct sk_buff *this = NULL;
1834         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1835         struct sk_buff *list_skb = list->next;
1836         unsigned long flags;
1837
1838         bh_lock_sock(sk);
1839         if (!skb_queue_empty(list)) {
1840                 spin_lock_irqsave(&list->lock, flags);
1841
1842                 while (list_skb != (struct sk_buff *)list) {
1843                         if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1844                                 this = list_skb;
1845                                 break;
1846                         }
1847                         list_skb = list_skb->next;
1848                 }
1849                 if (this)
1850                         __skb_unlink(this, list);
1851
1852                 spin_unlock_irqrestore(&list->lock, flags);
1853
1854                 if (this) {
1855                         kfree_skb(this);
1856                         /* wake up any process waiting for sending */
1857                         iucv_sock_wake_msglim(sk);
1858                 }
1859         }
1860
1861         if (sk->sk_state == IUCV_CLOSING) {
1862                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1863                         sk->sk_state = IUCV_CLOSED;
1864                         sk->sk_state_change(sk);
1865                 }
1866         }
1867         bh_unlock_sock(sk);
1868
1869 }
1870
1871 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1872 {
1873         struct sock *sk = path->private;
1874
1875         if (sk->sk_state == IUCV_CLOSED)
1876                 return;
1877
1878         bh_lock_sock(sk);
1879         iucv_sever_path(sk, 1);
1880         sk->sk_state = IUCV_DISCONN;
1881
1882         sk->sk_state_change(sk);
1883         bh_unlock_sock(sk);
1884 }
1885
1886 /* called if the other communication side shuts down its RECV direction;
1887  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1888  */
1889 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1890 {
1891         struct sock *sk = path->private;
1892
1893         bh_lock_sock(sk);
1894         if (sk->sk_state != IUCV_CLOSED) {
1895                 sk->sk_shutdown |= SEND_SHUTDOWN;
1896                 sk->sk_state_change(sk);
1897         }
1898         bh_unlock_sock(sk);
1899 }
1900
1901 /***************** HiperSockets transport callbacks ********************/
1902 static void afiucv_swap_src_dest(struct sk_buff *skb)
1903 {
1904         struct af_iucv_trans_hdr *trans_hdr =
1905                                 (struct af_iucv_trans_hdr *)skb->data;
1906         char tmpID[8];
1907         char tmpName[8];
1908
1909         ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1910         ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1911         ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1912         ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1913         memcpy(tmpID, trans_hdr->srcUserID, 8);
1914         memcpy(tmpName, trans_hdr->srcAppName, 8);
1915         memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1916         memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1917         memcpy(trans_hdr->destUserID, tmpID, 8);
1918         memcpy(trans_hdr->destAppName, tmpName, 8);
1919         skb_push(skb, ETH_HLEN);
1920         memset(skb->data, 0, ETH_HLEN);
1921 }
1922
1923 /**
1924  * afiucv_hs_callback_syn - react on received SYN
1925  **/
1926 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1927 {
1928         struct sock *nsk;
1929         struct iucv_sock *iucv, *niucv;
1930         struct af_iucv_trans_hdr *trans_hdr;
1931         int err;
1932
1933         iucv = iucv_sk(sk);
1934         trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1935         if (!iucv) {
1936                 /* no sock - connection refused */
1937                 afiucv_swap_src_dest(skb);
1938                 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1939                 err = dev_queue_xmit(skb);
1940                 goto out;
1941         }
1942
1943         nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1944         bh_lock_sock(sk);
1945         if ((sk->sk_state != IUCV_LISTEN) ||
1946             sk_acceptq_is_full(sk) ||
1947             !nsk) {
1948                 /* error on server socket - connection refused */
1949                 if (nsk)
1950                         sk_free(nsk);
1951                 afiucv_swap_src_dest(skb);
1952                 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1953                 err = dev_queue_xmit(skb);
1954                 bh_unlock_sock(sk);
1955                 goto out;
1956         }
1957
1958         niucv = iucv_sk(nsk);
1959         iucv_sock_init(nsk, sk);
1960         niucv->transport = AF_IUCV_TRANS_HIPER;
1961         niucv->msglimit = iucv->msglimit;
1962         if (!trans_hdr->window)
1963                 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1964         else
1965                 niucv->msglimit_peer = trans_hdr->window;
1966         memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1967         memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1968         memcpy(niucv->src_name, iucv->src_name, 8);
1969         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1970         nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1971         niucv->hs_dev = iucv->hs_dev;
1972         dev_hold(niucv->hs_dev);
1973         afiucv_swap_src_dest(skb);
1974         trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1975         trans_hdr->window = niucv->msglimit;
1976         /* if receiver acks the xmit connection is established */
1977         err = dev_queue_xmit(skb);
1978         if (!err) {
1979                 iucv_accept_enqueue(sk, nsk);
1980                 nsk->sk_state = IUCV_CONNECTED;
1981                 sk->sk_data_ready(sk, 1);
1982         } else
1983                 iucv_sock_kill(nsk);
1984         bh_unlock_sock(sk);
1985
1986 out:
1987         return NET_RX_SUCCESS;
1988 }
1989
1990 /**
1991  * afiucv_hs_callback_synack() - react on received SYN-ACK
1992  **/
1993 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1994 {
1995         struct iucv_sock *iucv = iucv_sk(sk);
1996         struct af_iucv_trans_hdr *trans_hdr =
1997                                         (struct af_iucv_trans_hdr *)skb->data;
1998
1999         if (!iucv)
2000                 goto out;
2001         if (sk->sk_state != IUCV_BOUND)
2002                 goto out;
2003         bh_lock_sock(sk);
2004         iucv->msglimit_peer = trans_hdr->window;
2005         sk->sk_state = IUCV_CONNECTED;
2006         sk->sk_state_change(sk);
2007         bh_unlock_sock(sk);
2008 out:
2009         kfree_skb(skb);
2010         return NET_RX_SUCCESS;
2011 }
2012
2013 /**
2014  * afiucv_hs_callback_synfin() - react on received SYN_FIN
2015  **/
2016 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2017 {
2018         struct iucv_sock *iucv = iucv_sk(sk);
2019
2020         if (!iucv)
2021                 goto out;
2022         if (sk->sk_state != IUCV_BOUND)
2023                 goto out;
2024         bh_lock_sock(sk);
2025         sk->sk_state = IUCV_DISCONN;
2026         sk->sk_state_change(sk);
2027         bh_unlock_sock(sk);
2028 out:
2029         kfree_skb(skb);
2030         return NET_RX_SUCCESS;
2031 }
2032
2033 /**
2034  * afiucv_hs_callback_fin() - react on received FIN
2035  **/
2036 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2037 {
2038         struct iucv_sock *iucv = iucv_sk(sk);
2039
2040         /* other end of connection closed */
2041         if (!iucv)
2042                 goto out;
2043         bh_lock_sock(sk);
2044         if (sk->sk_state == IUCV_CONNECTED) {
2045                 sk->sk_state = IUCV_DISCONN;
2046                 sk->sk_state_change(sk);
2047         }
2048         bh_unlock_sock(sk);
2049 out:
2050         kfree_skb(skb);
2051         return NET_RX_SUCCESS;
2052 }
2053
2054 /**
2055  * afiucv_hs_callback_win() - react on received WIN
2056  **/
2057 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2058 {
2059         struct iucv_sock *iucv = iucv_sk(sk);
2060         struct af_iucv_trans_hdr *trans_hdr =
2061                                         (struct af_iucv_trans_hdr *)skb->data;
2062
2063         if (!iucv)
2064                 return NET_RX_SUCCESS;
2065
2066         if (sk->sk_state != IUCV_CONNECTED)
2067                 return NET_RX_SUCCESS;
2068
2069         atomic_sub(trans_hdr->window, &iucv->msg_sent);
2070         iucv_sock_wake_msglim(sk);
2071         return NET_RX_SUCCESS;
2072 }
2073
2074 /**
2075  * afiucv_hs_callback_rx() - react on received data
2076  **/
2077 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2078 {
2079         struct iucv_sock *iucv = iucv_sk(sk);
2080
2081         if (!iucv) {
2082                 kfree_skb(skb);
2083                 return NET_RX_SUCCESS;
2084         }
2085
2086         if (sk->sk_state != IUCV_CONNECTED) {
2087                 kfree_skb(skb);
2088                 return NET_RX_SUCCESS;
2089         }
2090
2091                 /* write stuff from iucv_msg to skb cb */
2092         if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2093                 kfree_skb(skb);
2094                 return NET_RX_SUCCESS;
2095         }
2096         skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2097         skb_reset_transport_header(skb);
2098         skb_reset_network_header(skb);
2099         spin_lock(&iucv->message_q.lock);
2100         if (skb_queue_empty(&iucv->backlog_skb_q)) {
2101                 if (sock_queue_rcv_skb(sk, skb)) {
2102                         /* handle rcv queue full */
2103                         skb_queue_tail(&iucv->backlog_skb_q, skb);
2104                 }
2105         } else
2106                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2107         spin_unlock(&iucv->message_q.lock);
2108         return NET_RX_SUCCESS;
2109 }
2110
2111 /**
2112  * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2113  *                   transport
2114  *                   called from netif RX softirq
2115  **/
2116 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2117         struct packet_type *pt, struct net_device *orig_dev)
2118 {
2119         struct hlist_node *node;
2120         struct sock *sk;
2121         struct iucv_sock *iucv;
2122         struct af_iucv_trans_hdr *trans_hdr;
2123         char nullstring[8];
2124         int err = 0;
2125
2126         skb_pull(skb, ETH_HLEN);
2127         trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2128         EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2129         EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2130         EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2131         EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2132         memset(nullstring, 0, sizeof(nullstring));
2133         iucv = NULL;
2134         sk = NULL;
2135         read_lock(&iucv_sk_list.lock);
2136         sk_for_each(sk, node, &iucv_sk_list.head) {
2137                 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2138                         if ((!memcmp(&iucv_sk(sk)->src_name,
2139                                      trans_hdr->destAppName, 8)) &&
2140                             (!memcmp(&iucv_sk(sk)->src_user_id,
2141                                      trans_hdr->destUserID, 8)) &&
2142                             (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2143                             (!memcmp(&iucv_sk(sk)->dst_user_id,
2144                                      nullstring, 8))) {
2145                                 iucv = iucv_sk(sk);
2146                                 break;
2147                         }
2148                 } else {
2149                         if ((!memcmp(&iucv_sk(sk)->src_name,
2150                                      trans_hdr->destAppName, 8)) &&
2151                             (!memcmp(&iucv_sk(sk)->src_user_id,
2152                                      trans_hdr->destUserID, 8)) &&
2153                             (!memcmp(&iucv_sk(sk)->dst_name,
2154                                      trans_hdr->srcAppName, 8)) &&
2155                             (!memcmp(&iucv_sk(sk)->dst_user_id,
2156                                      trans_hdr->srcUserID, 8))) {
2157                                 iucv = iucv_sk(sk);
2158                                 break;
2159                         }
2160                 }
2161         }
2162         read_unlock(&iucv_sk_list.lock);
2163         if (!iucv)
2164                 sk = NULL;
2165
2166         /* no sock
2167         how should we send with no sock
2168         1) send without sock no send rc checking?
2169         2) introduce default sock to handle this cases
2170
2171          SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2172          data -> send FIN
2173          SYN|ACK, SYN|FIN, FIN -> no action? */
2174
2175         switch (trans_hdr->flags) {
2176         case AF_IUCV_FLAG_SYN:
2177                 /* connect request */
2178                 err = afiucv_hs_callback_syn(sk, skb);
2179                 break;
2180         case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2181                 /* connect request confirmed */
2182                 err = afiucv_hs_callback_synack(sk, skb);
2183                 break;
2184         case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2185                 /* connect request refused */
2186                 err = afiucv_hs_callback_synfin(sk, skb);
2187                 break;
2188         case (AF_IUCV_FLAG_FIN):
2189                 /* close request */
2190                 err = afiucv_hs_callback_fin(sk, skb);
2191                 break;
2192         case (AF_IUCV_FLAG_WIN):
2193                 err = afiucv_hs_callback_win(sk, skb);
2194                 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2195                         kfree_skb(skb);
2196                         break;
2197                 }
2198                 /* fall through */
2199         case 0:
2200                 /* plain data frame */
2201                 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
2202                        CB_TRGCLS_LEN);
2203                 err = afiucv_hs_callback_rx(sk, skb);
2204                 break;
2205         default:
2206                 ;
2207         }
2208
2209         return err;
2210 }
2211
2212 /**
2213  * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2214  *                                 transport
2215  **/
2216 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2217                                         enum iucv_tx_notify n)
2218 {
2219         struct sock *isk = skb->sk;
2220         struct sock *sk = NULL;
2221         struct iucv_sock *iucv = NULL;
2222         struct sk_buff_head *list;
2223         struct sk_buff *list_skb;
2224         struct sk_buff *nskb;
2225         unsigned long flags;
2226         struct hlist_node *node;
2227
2228         read_lock_irqsave(&iucv_sk_list.lock, flags);
2229         sk_for_each(sk, node, &iucv_sk_list.head)
2230                 if (sk == isk) {
2231                         iucv = iucv_sk(sk);
2232                         break;
2233                 }
2234         read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2235
2236         if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2237                 return;
2238
2239         list = &iucv->send_skb_q;
2240         spin_lock_irqsave(&list->lock, flags);
2241         if (skb_queue_empty(list))
2242                 goto out_unlock;
2243         list_skb = list->next;
2244         nskb = list_skb->next;
2245         while (list_skb != (struct sk_buff *)list) {
2246                 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2247                         switch (n) {
2248                         case TX_NOTIFY_OK:
2249                                 __skb_unlink(list_skb, list);
2250                                 kfree_skb(list_skb);
2251                                 iucv_sock_wake_msglim(sk);
2252                                 break;
2253                         case TX_NOTIFY_PENDING:
2254                                 atomic_inc(&iucv->pendings);
2255                                 break;
2256                         case TX_NOTIFY_DELAYED_OK:
2257                                 __skb_unlink(list_skb, list);
2258                                 atomic_dec(&iucv->pendings);
2259                                 if (atomic_read(&iucv->pendings) <= 0)
2260                                         iucv_sock_wake_msglim(sk);
2261                                 kfree_skb(list_skb);
2262                                 break;
2263                         case TX_NOTIFY_UNREACHABLE:
2264                         case TX_NOTIFY_DELAYED_UNREACHABLE:
2265                         case TX_NOTIFY_TPQFULL: /* not yet used */
2266                         case TX_NOTIFY_GENERALERROR:
2267                         case TX_NOTIFY_DELAYED_GENERALERROR:
2268                                 __skb_unlink(list_skb, list);
2269                                 kfree_skb(list_skb);
2270                                 if (sk->sk_state == IUCV_CONNECTED) {
2271                                         sk->sk_state = IUCV_DISCONN;
2272                                         sk->sk_state_change(sk);
2273                                 }
2274                                 break;
2275                         }
2276                         break;
2277                 }
2278                 list_skb = nskb;
2279                 nskb = nskb->next;
2280         }
2281 out_unlock:
2282         spin_unlock_irqrestore(&list->lock, flags);
2283
2284         if (sk->sk_state == IUCV_CLOSING) {
2285                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2286                         sk->sk_state = IUCV_CLOSED;
2287                         sk->sk_state_change(sk);
2288                 }
2289         }
2290
2291 }
2292 static const struct proto_ops iucv_sock_ops = {
2293         .family         = PF_IUCV,
2294         .owner          = THIS_MODULE,
2295         .release        = iucv_sock_release,
2296         .bind           = iucv_sock_bind,
2297         .connect        = iucv_sock_connect,
2298         .listen         = iucv_sock_listen,
2299         .accept         = iucv_sock_accept,
2300         .getname        = iucv_sock_getname,
2301         .sendmsg        = iucv_sock_sendmsg,
2302         .recvmsg        = iucv_sock_recvmsg,
2303         .poll           = iucv_sock_poll,
2304         .ioctl          = sock_no_ioctl,
2305         .mmap           = sock_no_mmap,
2306         .socketpair     = sock_no_socketpair,
2307         .shutdown       = iucv_sock_shutdown,
2308         .setsockopt     = iucv_sock_setsockopt,
2309         .getsockopt     = iucv_sock_getsockopt,
2310 };
2311
2312 static const struct net_proto_family iucv_sock_family_ops = {
2313         .family = AF_IUCV,
2314         .owner  = THIS_MODULE,
2315         .create = iucv_sock_create,
2316 };
2317
2318 static struct packet_type iucv_packet_type = {
2319         .type = cpu_to_be16(ETH_P_AF_IUCV),
2320         .func = afiucv_hs_rcv,
2321 };
2322
2323 static int afiucv_iucv_init(void)
2324 {
2325         int err;
2326
2327         err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2328         if (err)
2329                 goto out;
2330         /* establish dummy device */
2331         af_iucv_driver.bus = pr_iucv->bus;
2332         err = driver_register(&af_iucv_driver);
2333         if (err)
2334                 goto out_iucv;
2335         af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2336         if (!af_iucv_dev) {
2337                 err = -ENOMEM;
2338                 goto out_driver;
2339         }
2340         dev_set_name(af_iucv_dev, "af_iucv");
2341         af_iucv_dev->bus = pr_iucv->bus;
2342         af_iucv_dev->parent = pr_iucv->root;
2343         af_iucv_dev->release = (void (*)(struct device *))kfree;
2344         af_iucv_dev->driver = &af_iucv_driver;
2345         err = device_register(af_iucv_dev);
2346         if (err)
2347                 goto out_driver;
2348         return 0;
2349
2350 out_driver:
2351         driver_unregister(&af_iucv_driver);
2352 out_iucv:
2353         pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2354 out:
2355         return err;
2356 }
2357
2358 static int __init afiucv_init(void)
2359 {
2360         int err;
2361
2362         if (MACHINE_IS_VM) {
2363                 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2364                 if (unlikely(err)) {
2365                         WARN_ON(err);
2366                         err = -EPROTONOSUPPORT;
2367                         goto out;
2368                 }
2369
2370                 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2371                 if (!pr_iucv) {
2372                         printk(KERN_WARNING "iucv_if lookup failed\n");
2373                         memset(&iucv_userid, 0, sizeof(iucv_userid));
2374                 }
2375         } else {
2376                 memset(&iucv_userid, 0, sizeof(iucv_userid));
2377                 pr_iucv = NULL;
2378         }
2379
2380         err = proto_register(&iucv_proto, 0);
2381         if (err)
2382                 goto out;
2383         err = sock_register(&iucv_sock_family_ops);
2384         if (err)
2385                 goto out_proto;
2386
2387         if (pr_iucv) {
2388                 err = afiucv_iucv_init();
2389                 if (err)
2390                         goto out_sock;
2391         }
2392         dev_add_pack(&iucv_packet_type);
2393         return 0;
2394
2395 out_sock:
2396         sock_unregister(PF_IUCV);
2397 out_proto:
2398         proto_unregister(&iucv_proto);
2399 out:
2400         if (pr_iucv)
2401                 symbol_put(iucv_if);
2402         return err;
2403 }
2404
2405 static void __exit afiucv_exit(void)
2406 {
2407         if (pr_iucv) {
2408                 device_unregister(af_iucv_dev);
2409                 driver_unregister(&af_iucv_driver);
2410                 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2411                 symbol_put(iucv_if);
2412         }
2413         dev_remove_pack(&iucv_packet_type);
2414         sock_unregister(PF_IUCV);
2415         proto_unregister(&iucv_proto);
2416 }
2417
2418 module_init(afiucv_init);
2419 module_exit(afiucv_exit);
2420
2421 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2422 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2423 MODULE_VERSION(VERSION);
2424 MODULE_LICENSE("GPL");
2425 MODULE_ALIAS_NETPROTO(PF_IUCV);
2426