2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid[80];
36 static const struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 static struct iucv_interface *pr_iucv;
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
52 /* macros to set/get socket control buffer at correct offset */
53 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
58 #define __iucv_sock_wait(sk, condition, timeo, ret) \
60 DEFINE_WAIT(__wait); \
61 long __timeo = timeo; \
63 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
64 while (!(condition)) { \
69 if (signal_pending(current)) { \
70 ret = sock_intr_errno(__timeo); \
74 __timeo = schedule_timeout(__timeo); \
76 ret = sock_error(sk); \
80 finish_wait(sk_sleep(sk), &__wait); \
83 #define iucv_sock_wait(sk, condition, timeo) \
87 __iucv_sock_wait(sk, condition, timeo, __ret); \
91 static void iucv_sock_kill(struct sock *sk);
92 static void iucv_sock_close(struct sock *sk);
93 static void iucv_sever_path(struct sock *, int);
95 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
96 struct packet_type *pt, struct net_device *orig_dev);
97 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
98 struct sk_buff *skb, u8 flags);
99 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
101 /* Call Back functions */
102 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
103 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
104 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
105 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
107 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
108 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
110 static struct iucv_sock_list iucv_sk_list = {
111 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
112 .autobind_name = ATOMIC_INIT(0)
115 static struct iucv_handler af_iucv_handler = {
116 .path_pending = iucv_callback_connreq,
117 .path_complete = iucv_callback_connack,
118 .path_severed = iucv_callback_connrej,
119 .message_pending = iucv_callback_rx,
120 .message_complete = iucv_callback_txdone,
121 .path_quiesced = iucv_callback_shutdown,
124 static inline void high_nmcpy(unsigned char *dst, char *src)
129 static inline void low_nmcpy(unsigned char *dst, char *src)
131 memcpy(&dst[8], src, 8);
134 static int afiucv_pm_prepare(struct device *dev)
136 #ifdef CONFIG_PM_DEBUG
137 printk(KERN_WARNING "afiucv_pm_prepare\n");
142 static void afiucv_pm_complete(struct device *dev)
144 #ifdef CONFIG_PM_DEBUG
145 printk(KERN_WARNING "afiucv_pm_complete\n");
150 * afiucv_pm_freeze() - Freeze PM callback
151 * @dev: AFIUCV dummy device
153 * Sever all established IUCV communication pathes
155 static int afiucv_pm_freeze(struct device *dev)
157 struct iucv_sock *iucv;
161 #ifdef CONFIG_PM_DEBUG
162 printk(KERN_WARNING "afiucv_pm_freeze\n");
164 read_lock(&iucv_sk_list.lock);
165 sk_for_each(sk, &iucv_sk_list.head) {
167 switch (sk->sk_state) {
171 iucv_sever_path(sk, 0);
180 skb_queue_purge(&iucv->send_skb_q);
181 skb_queue_purge(&iucv->backlog_skb_q);
183 read_unlock(&iucv_sk_list.lock);
188 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
189 * @dev: AFIUCV dummy device
191 * socket clean up after freeze
193 static int afiucv_pm_restore_thaw(struct device *dev)
197 #ifdef CONFIG_PM_DEBUG
198 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
200 read_lock(&iucv_sk_list.lock);
201 sk_for_each(sk, &iucv_sk_list.head) {
202 switch (sk->sk_state) {
205 sk->sk_state = IUCV_DISCONN;
206 sk->sk_state_change(sk);
217 read_unlock(&iucv_sk_list.lock);
221 static const struct dev_pm_ops afiucv_pm_ops = {
222 .prepare = afiucv_pm_prepare,
223 .complete = afiucv_pm_complete,
224 .freeze = afiucv_pm_freeze,
225 .thaw = afiucv_pm_restore_thaw,
226 .restore = afiucv_pm_restore_thaw,
229 static struct device_driver af_iucv_driver = {
230 .owner = THIS_MODULE,
233 .pm = &afiucv_pm_ops,
236 /* dummy device used as trigger for PM functions */
237 static struct device *af_iucv_dev;
240 * iucv_msg_length() - Returns the length of an iucv message.
241 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
243 * The function returns the length of the specified iucv message @msg of data
244 * stored in a buffer and of data stored in the parameter list (PRMDATA).
246 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
248 * PRMDATA[0..6] socket data (max 7 bytes);
249 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
251 * The socket data length is computed by subtracting the socket data length
253 * If the socket data len is greater 7, then PRMDATA can be used for special
254 * notifications (see iucv_sock_shutdown); and further,
255 * if the socket data len is > 7, the function returns 8.
257 * Use this function to allocate socket buffers to store iucv message data.
259 static inline size_t iucv_msg_length(struct iucv_message *msg)
263 if (msg->flags & IUCV_IPRMDATA) {
264 datalen = 0xff - msg->rmmsg[7];
265 return (datalen < 8) ? datalen : 8;
271 * iucv_sock_in_state() - check for specific states
272 * @sk: sock structure
273 * @state: first iucv sk state
274 * @state: second iucv sk state
276 * Returns true if the socket in either in the first or second state.
278 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
280 return (sk->sk_state == state || sk->sk_state == state2);
284 * iucv_below_msglim() - function to check if messages can be sent
285 * @sk: sock structure
287 * Returns true if the send queue length is lower than the message limit.
288 * Always returns true if the socket is not connected (no iucv path for
289 * checking the message limit).
291 static inline int iucv_below_msglim(struct sock *sk)
293 struct iucv_sock *iucv = iucv_sk(sk);
295 if (sk->sk_state != IUCV_CONNECTED)
297 if (iucv->transport == AF_IUCV_TRANS_IUCV)
298 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
300 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
301 (atomic_read(&iucv->pendings) <= 0));
305 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
307 static void iucv_sock_wake_msglim(struct sock *sk)
309 struct socket_wq *wq;
312 wq = rcu_dereference(sk->sk_wq);
313 if (wq_has_sleeper(wq))
314 wake_up_interruptible_all(&wq->wait);
315 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
320 * afiucv_hs_send() - send a message through HiperSockets transport
322 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
323 struct sk_buff *skb, u8 flags)
325 struct iucv_sock *iucv = iucv_sk(sock);
326 struct af_iucv_trans_hdr *phs_hdr;
327 struct sk_buff *nskb;
328 int err, confirm_recv = 0;
330 memset(skb->head, 0, ETH_HLEN);
331 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
332 sizeof(struct af_iucv_trans_hdr));
333 skb_reset_mac_header(skb);
334 skb_reset_network_header(skb);
335 skb_push(skb, ETH_HLEN);
336 skb_reset_mac_header(skb);
337 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
339 phs_hdr->magic = ETH_P_AF_IUCV;
340 phs_hdr->version = 1;
341 phs_hdr->flags = flags;
342 if (flags == AF_IUCV_FLAG_SYN)
343 phs_hdr->window = iucv->msglimit;
344 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
345 confirm_recv = atomic_read(&iucv->msg_recv);
346 phs_hdr->window = confirm_recv;
348 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
350 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
351 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
352 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
353 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
354 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
355 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
356 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
357 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
359 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
361 skb->dev = iucv->hs_dev;
364 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
366 if (skb->len > skb->dev->mtu) {
367 if (sock->sk_type == SOCK_SEQPACKET)
370 skb_trim(skb, skb->dev->mtu);
372 skb->protocol = ETH_P_AF_IUCV;
373 nskb = skb_clone(skb, GFP_ATOMIC);
376 skb_queue_tail(&iucv->send_skb_q, nskb);
377 err = dev_queue_xmit(skb);
378 if (net_xmit_eval(err)) {
379 skb_unlink(nskb, &iucv->send_skb_q);
382 atomic_sub(confirm_recv, &iucv->msg_recv);
383 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
385 return net_xmit_eval(err);
388 static struct sock *__iucv_get_sock_by_name(char *nm)
392 sk_for_each(sk, &iucv_sk_list.head)
393 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
399 static void iucv_sock_destruct(struct sock *sk)
401 skb_queue_purge(&sk->sk_receive_queue);
402 skb_queue_purge(&sk->sk_error_queue);
406 if (!sock_flag(sk, SOCK_DEAD)) {
407 pr_err("Attempt to release alive iucv socket %p\n", sk);
411 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
412 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
413 WARN_ON(sk->sk_wmem_queued);
414 WARN_ON(sk->sk_forward_alloc);
418 static void iucv_sock_cleanup_listen(struct sock *parent)
422 /* Close non-accepted connections */
423 while ((sk = iucv_accept_dequeue(parent, NULL))) {
428 parent->sk_state = IUCV_CLOSED;
431 /* Kill socket (only if zapped and orphaned) */
432 static void iucv_sock_kill(struct sock *sk)
434 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
437 iucv_sock_unlink(&iucv_sk_list, sk);
438 sock_set_flag(sk, SOCK_DEAD);
442 /* Terminate an IUCV path */
443 static void iucv_sever_path(struct sock *sk, int with_user_data)
445 unsigned char user_data[16];
446 struct iucv_sock *iucv = iucv_sk(sk);
447 struct iucv_path *path = iucv->path;
451 if (with_user_data) {
452 low_nmcpy(user_data, iucv->src_name);
453 high_nmcpy(user_data, iucv->dst_name);
454 ASCEBC(user_data, sizeof(user_data));
455 pr_iucv->path_sever(path, user_data);
457 pr_iucv->path_sever(path, NULL);
458 iucv_path_free(path);
462 /* Send FIN through an IUCV socket for HIPER transport */
463 static int iucv_send_ctrl(struct sock *sk, u8 flags)
469 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
470 skb = sock_alloc_send_skb(sk, blen, 1, &err);
472 skb_reserve(skb, blen);
473 err = afiucv_hs_send(NULL, sk, skb, flags);
478 /* Close an IUCV socket */
479 static void iucv_sock_close(struct sock *sk)
481 struct iucv_sock *iucv = iucv_sk(sk);
487 switch (sk->sk_state) {
489 iucv_sock_cleanup_listen(sk);
493 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
494 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
495 sk->sk_state = IUCV_DISCONN;
496 sk->sk_state_change(sk);
498 case IUCV_DISCONN: /* fall through */
499 sk->sk_state = IUCV_CLOSING;
500 sk->sk_state_change(sk);
502 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
503 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
504 timeo = sk->sk_lingertime;
506 timeo = IUCV_DISCONN_TIMEOUT;
508 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
512 case IUCV_CLOSING: /* fall through */
513 sk->sk_state = IUCV_CLOSED;
514 sk->sk_state_change(sk);
516 sk->sk_err = ECONNRESET;
517 sk->sk_state_change(sk);
519 skb_queue_purge(&iucv->send_skb_q);
520 skb_queue_purge(&iucv->backlog_skb_q);
522 default: /* fall through */
523 iucv_sever_path(sk, 1);
527 dev_put(iucv->hs_dev);
529 sk->sk_bound_dev_if = 0;
532 /* mark socket for deletion by iucv_sock_kill() */
533 sock_set_flag(sk, SOCK_ZAPPED);
538 static void iucv_sock_init(struct sock *sk, struct sock *parent)
541 sk->sk_type = parent->sk_type;
544 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
547 struct iucv_sock *iucv;
549 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
554 sock_init_data(sock, sk);
555 INIT_LIST_HEAD(&iucv->accept_q);
556 spin_lock_init(&iucv->accept_q_lock);
557 skb_queue_head_init(&iucv->send_skb_q);
558 INIT_LIST_HEAD(&iucv->message_q.list);
559 spin_lock_init(&iucv->message_q.lock);
560 skb_queue_head_init(&iucv->backlog_skb_q);
562 atomic_set(&iucv->pendings, 0);
565 atomic_set(&iucv->msg_sent, 0);
566 atomic_set(&iucv->msg_recv, 0);
568 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
569 memset(&iucv->src_user_id , 0, 32);
571 iucv->transport = AF_IUCV_TRANS_IUCV;
573 iucv->transport = AF_IUCV_TRANS_HIPER;
575 sk->sk_destruct = iucv_sock_destruct;
576 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
577 sk->sk_allocation = GFP_DMA;
579 sock_reset_flag(sk, SOCK_ZAPPED);
581 sk->sk_protocol = proto;
582 sk->sk_state = IUCV_OPEN;
584 iucv_sock_link(&iucv_sk_list, sk);
588 /* Create an IUCV socket */
589 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
594 if (protocol && protocol != PF_IUCV)
595 return -EPROTONOSUPPORT;
597 sock->state = SS_UNCONNECTED;
599 switch (sock->type) {
601 sock->ops = &iucv_sock_ops;
604 /* currently, proto ops can handle both sk types */
605 sock->ops = &iucv_sock_ops;
608 return -ESOCKTNOSUPPORT;
611 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
615 iucv_sock_init(sk, NULL);
620 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
622 write_lock_bh(&l->lock);
623 sk_add_node(sk, &l->head);
624 write_unlock_bh(&l->lock);
627 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
629 write_lock_bh(&l->lock);
630 sk_del_node_init(sk);
631 write_unlock_bh(&l->lock);
634 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
637 struct iucv_sock *par = iucv_sk(parent);
640 spin_lock_irqsave(&par->accept_q_lock, flags);
641 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
642 spin_unlock_irqrestore(&par->accept_q_lock, flags);
643 iucv_sk(sk)->parent = parent;
644 sk_acceptq_added(parent);
647 void iucv_accept_unlink(struct sock *sk)
650 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
652 spin_lock_irqsave(&par->accept_q_lock, flags);
653 list_del_init(&iucv_sk(sk)->accept_q);
654 spin_unlock_irqrestore(&par->accept_q_lock, flags);
655 sk_acceptq_removed(iucv_sk(sk)->parent);
656 iucv_sk(sk)->parent = NULL;
660 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
662 struct iucv_sock *isk, *n;
665 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
666 sk = (struct sock *) isk;
669 if (sk->sk_state == IUCV_CLOSED) {
670 iucv_accept_unlink(sk);
675 if (sk->sk_state == IUCV_CONNECTED ||
676 sk->sk_state == IUCV_DISCONN ||
678 iucv_accept_unlink(sk);
680 sock_graft(sk, newsock);
691 /* Bind an unbound socket */
692 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
695 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
696 struct sock *sk = sock->sk;
697 struct iucv_sock *iucv;
699 struct net_device *dev;
702 /* Verify the input sockaddr */
703 if (!addr || addr->sa_family != AF_IUCV)
707 if (sk->sk_state != IUCV_OPEN) {
712 write_lock_bh(&iucv_sk_list.lock);
715 if (__iucv_get_sock_by_name(sa->siucv_name)) {
722 /* Bind the socket */
724 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
725 goto vm_bind; /* VM IUCV transport */
727 /* try hiper transport */
728 memcpy(uid, sa->siucv_user_id, sizeof(uid));
731 for_each_netdev_rcu(&init_net, dev) {
732 if (!memcmp(dev->perm_addr, uid, 8)) {
733 memcpy(iucv->src_name, sa->siucv_name, 8);
734 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
735 sk->sk_bound_dev_if = dev->ifindex;
738 sk->sk_state = IUCV_BOUND;
739 iucv->transport = AF_IUCV_TRANS_HIPER;
741 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
749 /* use local userid for backward compat */
750 memcpy(iucv->src_name, sa->siucv_name, 8);
751 memcpy(iucv->src_user_id, iucv_userid, 8);
752 sk->sk_state = IUCV_BOUND;
753 iucv->transport = AF_IUCV_TRANS_IUCV;
755 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
758 /* found no dev to bind */
761 /* Release the socket list lock */
762 write_unlock_bh(&iucv_sk_list.lock);
768 /* Automatically bind an unbound socket */
769 static int iucv_sock_autobind(struct sock *sk)
771 struct iucv_sock *iucv = iucv_sk(sk);
775 if (unlikely(!pr_iucv))
778 memcpy(iucv->src_user_id, iucv_userid, 8);
780 write_lock_bh(&iucv_sk_list.lock);
782 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
783 while (__iucv_get_sock_by_name(name)) {
784 sprintf(name, "%08x",
785 atomic_inc_return(&iucv_sk_list.autobind_name));
788 write_unlock_bh(&iucv_sk_list.lock);
790 memcpy(&iucv->src_name, name, 8);
793 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
798 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
800 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
801 struct sock *sk = sock->sk;
802 struct iucv_sock *iucv = iucv_sk(sk);
803 unsigned char user_data[16];
806 high_nmcpy(user_data, sa->siucv_name);
807 low_nmcpy(user_data, iucv->src_name);
808 ASCEBC(user_data, sizeof(user_data));
811 iucv->path = iucv_path_alloc(iucv->msglimit,
812 IUCV_IPRMDATA, GFP_KERNEL);
817 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
818 sa->siucv_user_id, NULL, user_data,
821 iucv_path_free(iucv->path);
824 case 0x0b: /* Target communicator is not logged on */
827 case 0x0d: /* Max connections for this guest exceeded */
828 case 0x0e: /* Max connections for target guest exceeded */
831 case 0x0f: /* Missing IUCV authorization */
843 /* Connect an unconnected socket */
844 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
847 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
848 struct sock *sk = sock->sk;
849 struct iucv_sock *iucv = iucv_sk(sk);
852 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
855 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
858 if (sk->sk_state == IUCV_OPEN &&
859 iucv->transport == AF_IUCV_TRANS_HIPER)
860 return -EBADFD; /* explicit bind required */
862 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
865 if (sk->sk_state == IUCV_OPEN) {
866 err = iucv_sock_autobind(sk);
873 /* Set the destination information */
874 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
875 memcpy(iucv->dst_name, sa->siucv_name, 8);
877 if (iucv->transport == AF_IUCV_TRANS_HIPER)
878 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
880 err = afiucv_path_connect(sock, addr);
884 if (sk->sk_state != IUCV_CONNECTED)
885 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
887 sock_sndtimeo(sk, flags & O_NONBLOCK));
889 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
892 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
893 iucv_sever_path(sk, 0);
900 /* Move a socket into listening state. */
901 static int iucv_sock_listen(struct socket *sock, int backlog)
903 struct sock *sk = sock->sk;
909 if (sk->sk_state != IUCV_BOUND)
912 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
915 sk->sk_max_ack_backlog = backlog;
916 sk->sk_ack_backlog = 0;
917 sk->sk_state = IUCV_LISTEN;
925 /* Accept a pending connection */
926 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
929 DECLARE_WAITQUEUE(wait, current);
930 struct sock *sk = sock->sk, *nsk;
934 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
936 if (sk->sk_state != IUCV_LISTEN) {
941 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
943 /* Wait for an incoming connection */
944 add_wait_queue_exclusive(sk_sleep(sk), &wait);
945 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
946 set_current_state(TASK_INTERRUPTIBLE);
953 timeo = schedule_timeout(timeo);
954 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
956 if (sk->sk_state != IUCV_LISTEN) {
961 if (signal_pending(current)) {
962 err = sock_intr_errno(timeo);
967 set_current_state(TASK_RUNNING);
968 remove_wait_queue(sk_sleep(sk), &wait);
973 newsock->state = SS_CONNECTED;
980 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
983 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
984 struct sock *sk = sock->sk;
985 struct iucv_sock *iucv = iucv_sk(sk);
987 addr->sa_family = AF_IUCV;
988 *len = sizeof(struct sockaddr_iucv);
991 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
992 memcpy(siucv->siucv_name, iucv->dst_name, 8);
994 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
995 memcpy(siucv->siucv_name, iucv->src_name, 8);
997 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
998 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
999 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1005 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1007 * @msg: Pointer to a struct iucv_message
1008 * @skb: The socket data to send, skb->len MUST BE <= 7
1010 * Send the socket data in the parameter list in the iucv message
1011 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1012 * list and the socket data len at index 7 (last byte).
1013 * See also iucv_msg_length().
1015 * Returns the error code from the iucv_message_send() call.
1017 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1018 struct sk_buff *skb)
1022 memcpy(prmdata, (void *) skb->data, skb->len);
1023 prmdata[7] = 0xff - (u8) skb->len;
1024 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1025 (void *) prmdata, 8);
1028 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1029 struct msghdr *msg, size_t len)
1031 struct sock *sk = sock->sk;
1032 struct iucv_sock *iucv = iucv_sk(sk);
1033 struct sk_buff *skb;
1034 struct iucv_message txmsg;
1035 struct cmsghdr *cmsg;
1041 int noblock = msg->msg_flags & MSG_DONTWAIT;
1043 err = sock_error(sk);
1047 if (msg->msg_flags & MSG_OOB)
1050 /* SOCK_SEQPACKET: we do not support segmented records */
1051 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1056 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1061 /* Return if the socket is not in connected state */
1062 if (sk->sk_state != IUCV_CONNECTED) {
1067 /* initialize defaults */
1068 cmsg_done = 0; /* check for duplicate headers */
1071 /* iterate over control messages */
1072 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1073 cmsg = CMSG_NXTHDR(msg, cmsg)) {
1075 if (!CMSG_OK(msg, cmsg)) {
1080 if (cmsg->cmsg_level != SOL_IUCV)
1083 if (cmsg->cmsg_type & cmsg_done) {
1087 cmsg_done |= cmsg->cmsg_type;
1089 switch (cmsg->cmsg_type) {
1090 case SCM_IUCV_TRGCLS:
1091 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1096 /* set iucv message target class */
1097 memcpy(&txmsg.class,
1098 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1109 /* allocate one skb for each iucv message:
1110 * this is fine for SOCK_SEQPACKET (unless we want to support
1111 * segmented records using the MSG_EOR flag), but
1112 * for SOCK_STREAM we might want to improve it in future */
1113 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1114 skb = sock_alloc_send_skb(sk,
1115 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1118 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1123 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1124 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1125 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1130 /* wait if outstanding messages for iucv path has reached */
1131 timeo = sock_sndtimeo(sk, noblock);
1132 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1136 /* return -ECONNRESET if the socket is no longer connected */
1137 if (sk->sk_state != IUCV_CONNECTED) {
1142 /* increment and save iucv message tag for msg_completion cbk */
1143 txmsg.tag = iucv->send_tag++;
1144 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1146 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1147 atomic_inc(&iucv->msg_sent);
1148 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1150 atomic_dec(&iucv->msg_sent);
1155 skb_queue_tail(&iucv->send_skb_q, skb);
1157 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1159 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1161 /* on success: there is no message_complete callback
1162 * for an IPRMDATA msg; remove skb from send queue */
1164 skb_unlink(skb, &iucv->send_skb_q);
1168 /* this error should never happen since the
1169 * IUCV_IPRMDATA path flag is set... sever path */
1171 pr_iucv->path_sever(iucv->path, NULL);
1172 skb_unlink(skb, &iucv->send_skb_q);
1177 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1178 (void *) skb->data, skb->len);
1182 memcpy(user_id, iucv->dst_user_id, 8);
1184 memcpy(appl_id, iucv->dst_name, 8);
1185 pr_err("Application %s on z/VM guest %s"
1186 " exceeds message limit\n",
1191 skb_unlink(skb, &iucv->send_skb_q);
1206 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1208 * Locking: must be called with message_q.lock held
1210 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1212 int dataleft, size, copied = 0;
1213 struct sk_buff *nskb;
1217 if (dataleft >= sk->sk_rcvbuf / 4)
1218 size = sk->sk_rcvbuf / 4;
1222 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1226 /* copy target class to control buffer of new skb */
1227 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1229 /* copy data fragment */
1230 memcpy(nskb->data, skb->data + copied, size);
1234 skb_reset_transport_header(nskb);
1235 skb_reset_network_header(nskb);
1238 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1244 /* iucv_process_message() - Receive a single outstanding IUCV message
1246 * Locking: must be called with message_q.lock held
1248 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1249 struct iucv_path *path,
1250 struct iucv_message *msg)
1255 len = iucv_msg_length(msg);
1257 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1258 /* Note: the first 4 bytes are reserved for msg tag */
1259 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1261 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1262 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1263 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1268 rc = pr_iucv->message_receive(path, msg,
1269 msg->flags & IUCV_IPRMDATA,
1270 skb->data, len, NULL);
1275 /* we need to fragment iucv messages for SOCK_STREAM only;
1276 * for SOCK_SEQPACKET, it is only relevant if we support
1277 * record segmentation using MSG_EOR (see also recvmsg()) */
1278 if (sk->sk_type == SOCK_STREAM &&
1279 skb->truesize >= sk->sk_rcvbuf / 4) {
1280 rc = iucv_fragment_skb(sk, skb, len);
1284 pr_iucv->path_sever(path, NULL);
1287 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1289 skb_reset_transport_header(skb);
1290 skb_reset_network_header(skb);
1295 if (sock_queue_rcv_skb(sk, skb))
1296 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1299 /* iucv_process_message_q() - Process outstanding IUCV messages
1301 * Locking: must be called with message_q.lock held
1303 static void iucv_process_message_q(struct sock *sk)
1305 struct iucv_sock *iucv = iucv_sk(sk);
1306 struct sk_buff *skb;
1307 struct sock_msg_q *p, *n;
1309 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1310 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1313 iucv_process_message(sk, skb, p->path, &p->msg);
1316 if (!skb_queue_empty(&iucv->backlog_skb_q))
1321 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1322 struct msghdr *msg, size_t len, int flags)
1324 int noblock = flags & MSG_DONTWAIT;
1325 struct sock *sk = sock->sk;
1326 struct iucv_sock *iucv = iucv_sk(sk);
1327 unsigned int copied, rlen;
1328 struct sk_buff *skb, *rskb, *cskb;
1331 msg->msg_namelen = 0;
1333 if ((sk->sk_state == IUCV_DISCONN) &&
1334 skb_queue_empty(&iucv->backlog_skb_q) &&
1335 skb_queue_empty(&sk->sk_receive_queue) &&
1336 list_empty(&iucv->message_q.list))
1339 if (flags & (MSG_OOB))
1342 /* receive/dequeue next skb:
1343 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1344 skb = skb_recv_datagram(sk, flags, noblock, &err);
1346 if (sk->sk_shutdown & RCV_SHUTDOWN)
1351 rlen = skb->len; /* real length of skb */
1352 copied = min_t(unsigned int, rlen, len);
1354 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1357 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1358 if (!(flags & MSG_PEEK))
1359 skb_queue_head(&sk->sk_receive_queue, skb);
1363 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1364 if (sk->sk_type == SOCK_SEQPACKET) {
1366 msg->msg_flags |= MSG_TRUNC;
1367 /* each iucv message contains a complete record */
1368 msg->msg_flags |= MSG_EOR;
1371 /* create control message to store iucv msg target class:
1372 * get the trgcls from the control buffer of the skb due to
1373 * fragmentation of original iucv message. */
1374 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1375 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1377 if (!(flags & MSG_PEEK))
1378 skb_queue_head(&sk->sk_receive_queue, skb);
1382 /* Mark read part of skb as used */
1383 if (!(flags & MSG_PEEK)) {
1385 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1386 if (sk->sk_type == SOCK_STREAM) {
1387 skb_pull(skb, copied);
1389 skb_queue_head(&sk->sk_receive_queue, skb);
1395 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1396 atomic_inc(&iucv->msg_recv);
1397 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1399 iucv_sock_close(sk);
1404 /* Queue backlog skbs */
1405 spin_lock_bh(&iucv->message_q.lock);
1406 rskb = skb_dequeue(&iucv->backlog_skb_q);
1408 if (sock_queue_rcv_skb(sk, rskb)) {
1409 skb_queue_head(&iucv->backlog_skb_q,
1413 rskb = skb_dequeue(&iucv->backlog_skb_q);
1416 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1417 if (!list_empty(&iucv->message_q.list))
1418 iucv_process_message_q(sk);
1419 if (atomic_read(&iucv->msg_recv) >=
1420 iucv->msglimit / 2) {
1421 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1423 sk->sk_state = IUCV_DISCONN;
1424 sk->sk_state_change(sk);
1428 spin_unlock_bh(&iucv->message_q.lock);
1432 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1433 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1439 static inline unsigned int iucv_accept_poll(struct sock *parent)
1441 struct iucv_sock *isk, *n;
1444 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1445 sk = (struct sock *) isk;
1447 if (sk->sk_state == IUCV_CONNECTED)
1448 return POLLIN | POLLRDNORM;
1454 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1457 struct sock *sk = sock->sk;
1458 unsigned int mask = 0;
1460 sock_poll_wait(file, sk_sleep(sk), wait);
1462 if (sk->sk_state == IUCV_LISTEN)
1463 return iucv_accept_poll(sk);
1465 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1467 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
1469 if (sk->sk_shutdown & RCV_SHUTDOWN)
1472 if (sk->sk_shutdown == SHUTDOWN_MASK)
1475 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1476 (sk->sk_shutdown & RCV_SHUTDOWN))
1477 mask |= POLLIN | POLLRDNORM;
1479 if (sk->sk_state == IUCV_CLOSED)
1482 if (sk->sk_state == IUCV_DISCONN)
1485 if (sock_writeable(sk) && iucv_below_msglim(sk))
1486 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1488 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1493 static int iucv_sock_shutdown(struct socket *sock, int how)
1495 struct sock *sk = sock->sk;
1496 struct iucv_sock *iucv = iucv_sk(sk);
1497 struct iucv_message txmsg;
1502 if ((how & ~SHUTDOWN_MASK) || !how)
1506 switch (sk->sk_state) {
1517 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1518 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1521 err = pr_iucv->message_send(iucv->path, &txmsg,
1522 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1537 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1540 sk->sk_shutdown |= how;
1541 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1542 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1543 err = pr_iucv->path_quiesce(iucv->path, NULL);
1546 /* skb_queue_purge(&sk->sk_receive_queue); */
1548 skb_queue_purge(&sk->sk_receive_queue);
1551 /* Wake up anyone sleeping in poll */
1552 sk->sk_state_change(sk);
1559 static int iucv_sock_release(struct socket *sock)
1561 struct sock *sk = sock->sk;
1567 iucv_sock_close(sk);
1574 /* getsockopt and setsockopt */
1575 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1576 char __user *optval, unsigned int optlen)
1578 struct sock *sk = sock->sk;
1579 struct iucv_sock *iucv = iucv_sk(sk);
1583 if (level != SOL_IUCV)
1584 return -ENOPROTOOPT;
1586 if (optlen < sizeof(int))
1589 if (get_user(val, (int __user *) optval))
1596 case SO_IPRMDATA_MSG:
1598 iucv->flags |= IUCV_IPRMDATA;
1600 iucv->flags &= ~IUCV_IPRMDATA;
1603 switch (sk->sk_state) {
1606 if (val < 1 || val > (u16)(~0))
1609 iucv->msglimit = val;
1625 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1626 char __user *optval, int __user *optlen)
1628 struct sock *sk = sock->sk;
1629 struct iucv_sock *iucv = iucv_sk(sk);
1633 if (level != SOL_IUCV)
1634 return -ENOPROTOOPT;
1636 if (get_user(len, optlen))
1642 len = min_t(unsigned int, len, sizeof(int));
1645 case SO_IPRMDATA_MSG:
1646 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1650 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1651 : iucv->msglimit; /* default */
1655 if (sk->sk_state == IUCV_OPEN)
1657 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1658 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1662 return -ENOPROTOOPT;
1665 if (put_user(len, optlen))
1667 if (copy_to_user(optval, &val, len))
1674 /* Callback wrappers - called from iucv base support */
1675 static int iucv_callback_connreq(struct iucv_path *path,
1676 u8 ipvmid[8], u8 ipuser[16])
1678 unsigned char user_data[16];
1679 unsigned char nuser_data[16];
1680 unsigned char src_name[8];
1681 struct sock *sk, *nsk;
1682 struct iucv_sock *iucv, *niucv;
1685 memcpy(src_name, ipuser, 8);
1686 EBCASC(src_name, 8);
1687 /* Find out if this path belongs to af_iucv. */
1688 read_lock(&iucv_sk_list.lock);
1691 sk_for_each(sk, &iucv_sk_list.head)
1692 if (sk->sk_state == IUCV_LISTEN &&
1693 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1695 * Found a listening socket with
1696 * src_name == ipuser[0-7].
1701 read_unlock(&iucv_sk_list.lock);
1703 /* No socket found, not one of our paths. */
1708 /* Check if parent socket is listening */
1709 low_nmcpy(user_data, iucv->src_name);
1710 high_nmcpy(user_data, iucv->dst_name);
1711 ASCEBC(user_data, sizeof(user_data));
1712 if (sk->sk_state != IUCV_LISTEN) {
1713 err = pr_iucv->path_sever(path, user_data);
1714 iucv_path_free(path);
1718 /* Check for backlog size */
1719 if (sk_acceptq_is_full(sk)) {
1720 err = pr_iucv->path_sever(path, user_data);
1721 iucv_path_free(path);
1725 /* Create the new socket */
1726 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1728 err = pr_iucv->path_sever(path, user_data);
1729 iucv_path_free(path);
1733 niucv = iucv_sk(nsk);
1734 iucv_sock_init(nsk, sk);
1736 /* Set the new iucv_sock */
1737 memcpy(niucv->dst_name, ipuser + 8, 8);
1738 EBCASC(niucv->dst_name, 8);
1739 memcpy(niucv->dst_user_id, ipvmid, 8);
1740 memcpy(niucv->src_name, iucv->src_name, 8);
1741 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1744 /* Call iucv_accept */
1745 high_nmcpy(nuser_data, ipuser + 8);
1746 memcpy(nuser_data + 8, niucv->src_name, 8);
1747 ASCEBC(nuser_data + 8, 8);
1749 /* set message limit for path based on msglimit of accepting socket */
1750 niucv->msglimit = iucv->msglimit;
1751 path->msglim = iucv->msglimit;
1752 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1754 iucv_sever_path(nsk, 1);
1755 iucv_sock_kill(nsk);
1759 iucv_accept_enqueue(sk, nsk);
1761 /* Wake up accept */
1762 nsk->sk_state = IUCV_CONNECTED;
1763 sk->sk_data_ready(sk, 1);
1770 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1772 struct sock *sk = path->private;
1774 sk->sk_state = IUCV_CONNECTED;
1775 sk->sk_state_change(sk);
1778 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1780 struct sock *sk = path->private;
1781 struct iucv_sock *iucv = iucv_sk(sk);
1782 struct sk_buff *skb;
1783 struct sock_msg_q *save_msg;
1786 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1787 pr_iucv->message_reject(path, msg);
1791 spin_lock(&iucv->message_q.lock);
1793 if (!list_empty(&iucv->message_q.list) ||
1794 !skb_queue_empty(&iucv->backlog_skb_q))
1797 len = atomic_read(&sk->sk_rmem_alloc);
1798 len += SKB_TRUESIZE(iucv_msg_length(msg));
1799 if (len > sk->sk_rcvbuf)
1802 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1806 iucv_process_message(sk, skb, path, msg);
1810 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1813 save_msg->path = path;
1814 save_msg->msg = *msg;
1816 list_add_tail(&save_msg->list, &iucv->message_q.list);
1819 spin_unlock(&iucv->message_q.lock);
1822 static void iucv_callback_txdone(struct iucv_path *path,
1823 struct iucv_message *msg)
1825 struct sock *sk = path->private;
1826 struct sk_buff *this = NULL;
1827 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1828 struct sk_buff *list_skb = list->next;
1829 unsigned long flags;
1832 if (!skb_queue_empty(list)) {
1833 spin_lock_irqsave(&list->lock, flags);
1835 while (list_skb != (struct sk_buff *)list) {
1836 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1840 list_skb = list_skb->next;
1843 __skb_unlink(this, list);
1845 spin_unlock_irqrestore(&list->lock, flags);
1849 /* wake up any process waiting for sending */
1850 iucv_sock_wake_msglim(sk);
1854 if (sk->sk_state == IUCV_CLOSING) {
1855 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1856 sk->sk_state = IUCV_CLOSED;
1857 sk->sk_state_change(sk);
1864 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1866 struct sock *sk = path->private;
1868 if (sk->sk_state == IUCV_CLOSED)
1872 iucv_sever_path(sk, 1);
1873 sk->sk_state = IUCV_DISCONN;
1875 sk->sk_state_change(sk);
1879 /* called if the other communication side shuts down its RECV direction;
1880 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1882 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1884 struct sock *sk = path->private;
1887 if (sk->sk_state != IUCV_CLOSED) {
1888 sk->sk_shutdown |= SEND_SHUTDOWN;
1889 sk->sk_state_change(sk);
1894 /***************** HiperSockets transport callbacks ********************/
1895 static void afiucv_swap_src_dest(struct sk_buff *skb)
1897 struct af_iucv_trans_hdr *trans_hdr =
1898 (struct af_iucv_trans_hdr *)skb->data;
1902 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1903 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1904 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1905 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1906 memcpy(tmpID, trans_hdr->srcUserID, 8);
1907 memcpy(tmpName, trans_hdr->srcAppName, 8);
1908 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1909 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1910 memcpy(trans_hdr->destUserID, tmpID, 8);
1911 memcpy(trans_hdr->destAppName, tmpName, 8);
1912 skb_push(skb, ETH_HLEN);
1913 memset(skb->data, 0, ETH_HLEN);
1917 * afiucv_hs_callback_syn - react on received SYN
1919 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1922 struct iucv_sock *iucv, *niucv;
1923 struct af_iucv_trans_hdr *trans_hdr;
1927 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1929 /* no sock - connection refused */
1930 afiucv_swap_src_dest(skb);
1931 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1932 err = dev_queue_xmit(skb);
1936 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1938 if ((sk->sk_state != IUCV_LISTEN) ||
1939 sk_acceptq_is_full(sk) ||
1941 /* error on server socket - connection refused */
1944 afiucv_swap_src_dest(skb);
1945 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1946 err = dev_queue_xmit(skb);
1951 niucv = iucv_sk(nsk);
1952 iucv_sock_init(nsk, sk);
1953 niucv->transport = AF_IUCV_TRANS_HIPER;
1954 niucv->msglimit = iucv->msglimit;
1955 if (!trans_hdr->window)
1956 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1958 niucv->msglimit_peer = trans_hdr->window;
1959 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1960 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1961 memcpy(niucv->src_name, iucv->src_name, 8);
1962 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1963 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1964 niucv->hs_dev = iucv->hs_dev;
1965 dev_hold(niucv->hs_dev);
1966 afiucv_swap_src_dest(skb);
1967 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1968 trans_hdr->window = niucv->msglimit;
1969 /* if receiver acks the xmit connection is established */
1970 err = dev_queue_xmit(skb);
1972 iucv_accept_enqueue(sk, nsk);
1973 nsk->sk_state = IUCV_CONNECTED;
1974 sk->sk_data_ready(sk, 1);
1976 iucv_sock_kill(nsk);
1980 return NET_RX_SUCCESS;
1984 * afiucv_hs_callback_synack() - react on received SYN-ACK
1986 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1988 struct iucv_sock *iucv = iucv_sk(sk);
1989 struct af_iucv_trans_hdr *trans_hdr =
1990 (struct af_iucv_trans_hdr *)skb->data;
1994 if (sk->sk_state != IUCV_BOUND)
1997 iucv->msglimit_peer = trans_hdr->window;
1998 sk->sk_state = IUCV_CONNECTED;
1999 sk->sk_state_change(sk);
2003 return NET_RX_SUCCESS;
2007 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2009 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2011 struct iucv_sock *iucv = iucv_sk(sk);
2015 if (sk->sk_state != IUCV_BOUND)
2018 sk->sk_state = IUCV_DISCONN;
2019 sk->sk_state_change(sk);
2023 return NET_RX_SUCCESS;
2027 * afiucv_hs_callback_fin() - react on received FIN
2029 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2031 struct iucv_sock *iucv = iucv_sk(sk);
2033 /* other end of connection closed */
2037 if (sk->sk_state == IUCV_CONNECTED) {
2038 sk->sk_state = IUCV_DISCONN;
2039 sk->sk_state_change(sk);
2044 return NET_RX_SUCCESS;
2048 * afiucv_hs_callback_win() - react on received WIN
2050 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2052 struct iucv_sock *iucv = iucv_sk(sk);
2053 struct af_iucv_trans_hdr *trans_hdr =
2054 (struct af_iucv_trans_hdr *)skb->data;
2057 return NET_RX_SUCCESS;
2059 if (sk->sk_state != IUCV_CONNECTED)
2060 return NET_RX_SUCCESS;
2062 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2063 iucv_sock_wake_msglim(sk);
2064 return NET_RX_SUCCESS;
2068 * afiucv_hs_callback_rx() - react on received data
2070 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2072 struct iucv_sock *iucv = iucv_sk(sk);
2076 return NET_RX_SUCCESS;
2079 if (sk->sk_state != IUCV_CONNECTED) {
2081 return NET_RX_SUCCESS;
2084 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2086 return NET_RX_SUCCESS;
2089 /* write stuff from iucv_msg to skb cb */
2090 if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2092 return NET_RX_SUCCESS;
2094 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2095 skb_reset_transport_header(skb);
2096 skb_reset_network_header(skb);
2097 spin_lock(&iucv->message_q.lock);
2098 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2099 if (sock_queue_rcv_skb(sk, skb)) {
2100 /* handle rcv queue full */
2101 skb_queue_tail(&iucv->backlog_skb_q, skb);
2104 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2105 spin_unlock(&iucv->message_q.lock);
2106 return NET_RX_SUCCESS;
2110 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2112 * called from netif RX softirq
2114 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2115 struct packet_type *pt, struct net_device *orig_dev)
2118 struct iucv_sock *iucv;
2119 struct af_iucv_trans_hdr *trans_hdr;
2123 skb_pull(skb, ETH_HLEN);
2124 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2125 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2126 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2127 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2128 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2129 memset(nullstring, 0, sizeof(nullstring));
2132 read_lock(&iucv_sk_list.lock);
2133 sk_for_each(sk, &iucv_sk_list.head) {
2134 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2135 if ((!memcmp(&iucv_sk(sk)->src_name,
2136 trans_hdr->destAppName, 8)) &&
2137 (!memcmp(&iucv_sk(sk)->src_user_id,
2138 trans_hdr->destUserID, 8)) &&
2139 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2140 (!memcmp(&iucv_sk(sk)->dst_user_id,
2146 if ((!memcmp(&iucv_sk(sk)->src_name,
2147 trans_hdr->destAppName, 8)) &&
2148 (!memcmp(&iucv_sk(sk)->src_user_id,
2149 trans_hdr->destUserID, 8)) &&
2150 (!memcmp(&iucv_sk(sk)->dst_name,
2151 trans_hdr->srcAppName, 8)) &&
2152 (!memcmp(&iucv_sk(sk)->dst_user_id,
2153 trans_hdr->srcUserID, 8))) {
2159 read_unlock(&iucv_sk_list.lock);
2164 how should we send with no sock
2165 1) send without sock no send rc checking?
2166 2) introduce default sock to handle this cases
2168 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2170 SYN|ACK, SYN|FIN, FIN -> no action? */
2172 switch (trans_hdr->flags) {
2173 case AF_IUCV_FLAG_SYN:
2174 /* connect request */
2175 err = afiucv_hs_callback_syn(sk, skb);
2177 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2178 /* connect request confirmed */
2179 err = afiucv_hs_callback_synack(sk, skb);
2181 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2182 /* connect request refused */
2183 err = afiucv_hs_callback_synfin(sk, skb);
2185 case (AF_IUCV_FLAG_FIN):
2187 err = afiucv_hs_callback_fin(sk, skb);
2189 case (AF_IUCV_FLAG_WIN):
2190 err = afiucv_hs_callback_win(sk, skb);
2191 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2195 /* fall through and receive non-zero length data */
2196 case (AF_IUCV_FLAG_SHT):
2197 /* shutdown request */
2198 /* fall through and receive zero length data */
2200 /* plain data frame */
2201 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
2203 err = afiucv_hs_callback_rx(sk, skb);
2213 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2216 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2217 enum iucv_tx_notify n)
2219 struct sock *isk = skb->sk;
2220 struct sock *sk = NULL;
2221 struct iucv_sock *iucv = NULL;
2222 struct sk_buff_head *list;
2223 struct sk_buff *list_skb;
2224 struct sk_buff *nskb;
2225 unsigned long flags;
2227 read_lock_irqsave(&iucv_sk_list.lock, flags);
2228 sk_for_each(sk, &iucv_sk_list.head)
2233 read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2235 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2238 list = &iucv->send_skb_q;
2239 spin_lock_irqsave(&list->lock, flags);
2240 if (skb_queue_empty(list))
2242 list_skb = list->next;
2243 nskb = list_skb->next;
2244 while (list_skb != (struct sk_buff *)list) {
2245 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2248 __skb_unlink(list_skb, list);
2249 kfree_skb(list_skb);
2250 iucv_sock_wake_msglim(sk);
2252 case TX_NOTIFY_PENDING:
2253 atomic_inc(&iucv->pendings);
2255 case TX_NOTIFY_DELAYED_OK:
2256 __skb_unlink(list_skb, list);
2257 atomic_dec(&iucv->pendings);
2258 if (atomic_read(&iucv->pendings) <= 0)
2259 iucv_sock_wake_msglim(sk);
2260 kfree_skb(list_skb);
2262 case TX_NOTIFY_UNREACHABLE:
2263 case TX_NOTIFY_DELAYED_UNREACHABLE:
2264 case TX_NOTIFY_TPQFULL: /* not yet used */
2265 case TX_NOTIFY_GENERALERROR:
2266 case TX_NOTIFY_DELAYED_GENERALERROR:
2267 __skb_unlink(list_skb, list);
2268 kfree_skb(list_skb);
2269 if (sk->sk_state == IUCV_CONNECTED) {
2270 sk->sk_state = IUCV_DISCONN;
2271 sk->sk_state_change(sk);
2281 spin_unlock_irqrestore(&list->lock, flags);
2283 if (sk->sk_state == IUCV_CLOSING) {
2284 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2285 sk->sk_state = IUCV_CLOSED;
2286 sk->sk_state_change(sk);
2293 * afiucv_netdev_event: handle netdev notifier chain events
2295 static int afiucv_netdev_event(struct notifier_block *this,
2296 unsigned long event, void *ptr)
2298 struct net_device *event_dev = (struct net_device *)ptr;
2300 struct iucv_sock *iucv;
2304 case NETDEV_GOING_DOWN:
2305 sk_for_each(sk, &iucv_sk_list.head) {
2307 if ((iucv->hs_dev == event_dev) &&
2308 (sk->sk_state == IUCV_CONNECTED)) {
2309 if (event == NETDEV_GOING_DOWN)
2310 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2311 sk->sk_state = IUCV_DISCONN;
2312 sk->sk_state_change(sk);
2317 case NETDEV_UNREGISTER:
2324 static struct notifier_block afiucv_netdev_notifier = {
2325 .notifier_call = afiucv_netdev_event,
2328 static const struct proto_ops iucv_sock_ops = {
2330 .owner = THIS_MODULE,
2331 .release = iucv_sock_release,
2332 .bind = iucv_sock_bind,
2333 .connect = iucv_sock_connect,
2334 .listen = iucv_sock_listen,
2335 .accept = iucv_sock_accept,
2336 .getname = iucv_sock_getname,
2337 .sendmsg = iucv_sock_sendmsg,
2338 .recvmsg = iucv_sock_recvmsg,
2339 .poll = iucv_sock_poll,
2340 .ioctl = sock_no_ioctl,
2341 .mmap = sock_no_mmap,
2342 .socketpair = sock_no_socketpair,
2343 .shutdown = iucv_sock_shutdown,
2344 .setsockopt = iucv_sock_setsockopt,
2345 .getsockopt = iucv_sock_getsockopt,
2348 static const struct net_proto_family iucv_sock_family_ops = {
2350 .owner = THIS_MODULE,
2351 .create = iucv_sock_create,
2354 static struct packet_type iucv_packet_type = {
2355 .type = cpu_to_be16(ETH_P_AF_IUCV),
2356 .func = afiucv_hs_rcv,
2359 static int afiucv_iucv_init(void)
2363 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2366 /* establish dummy device */
2367 af_iucv_driver.bus = pr_iucv->bus;
2368 err = driver_register(&af_iucv_driver);
2371 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2376 dev_set_name(af_iucv_dev, "af_iucv");
2377 af_iucv_dev->bus = pr_iucv->bus;
2378 af_iucv_dev->parent = pr_iucv->root;
2379 af_iucv_dev->release = (void (*)(struct device *))kfree;
2380 af_iucv_dev->driver = &af_iucv_driver;
2381 err = device_register(af_iucv_dev);
2387 driver_unregister(&af_iucv_driver);
2389 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2394 static int __init afiucv_init(void)
2398 if (MACHINE_IS_VM) {
2399 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2400 if (unlikely(err)) {
2402 err = -EPROTONOSUPPORT;
2406 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2408 printk(KERN_WARNING "iucv_if lookup failed\n");
2409 memset(&iucv_userid, 0, sizeof(iucv_userid));
2412 memset(&iucv_userid, 0, sizeof(iucv_userid));
2416 err = proto_register(&iucv_proto, 0);
2419 err = sock_register(&iucv_sock_family_ops);
2424 err = afiucv_iucv_init();
2428 register_netdevice_notifier(&afiucv_netdev_notifier);
2429 dev_add_pack(&iucv_packet_type);
2433 sock_unregister(PF_IUCV);
2435 proto_unregister(&iucv_proto);
2438 symbol_put(iucv_if);
2442 static void __exit afiucv_exit(void)
2445 device_unregister(af_iucv_dev);
2446 driver_unregister(&af_iucv_driver);
2447 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2448 symbol_put(iucv_if);
2450 unregister_netdevice_notifier(&afiucv_netdev_notifier);
2451 dev_remove_pack(&iucv_packet_type);
2452 sock_unregister(PF_IUCV);
2453 proto_unregister(&iucv_proto);
2456 module_init(afiucv_init);
2457 module_exit(afiucv_exit);
2459 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2460 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2461 MODULE_VERSION(VERSION);
2462 MODULE_LICENSE("GPL");
2463 MODULE_ALIAS_NETPROTO(PF_IUCV);