2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/iucv.h>
31 #include <net/iucv/af_iucv.h>
35 static char iucv_userid[80];
37 static const struct proto_ops iucv_sock_ops;
39 static struct proto iucv_proto = {
42 .obj_size = sizeof(struct iucv_sock),
45 /* special AF_IUCV IPRM messages */
46 static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 /* macros to set/get socket control buffer at correct offset */
52 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
53 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
54 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
57 #define __iucv_sock_wait(sk, condition, timeo, ret) \
59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \
62 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
63 while (!(condition)) { \
68 if (signal_pending(current)) { \
69 ret = sock_intr_errno(__timeo); \
73 __timeo = schedule_timeout(__timeo); \
75 ret = sock_error(sk); \
79 finish_wait(sk_sleep(sk), &__wait); \
82 #define iucv_sock_wait(sk, condition, timeo) \
86 __iucv_sock_wait(sk, condition, timeo, __ret); \
90 static void iucv_sock_kill(struct sock *sk);
91 static void iucv_sock_close(struct sock *sk);
93 /* Call Back functions */
94 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
96 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
97 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
99 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
100 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
102 static struct iucv_sock_list iucv_sk_list = {
103 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
104 .autobind_name = ATOMIC_INIT(0)
107 static struct iucv_handler af_iucv_handler = {
108 .path_pending = iucv_callback_connreq,
109 .path_complete = iucv_callback_connack,
110 .path_severed = iucv_callback_connrej,
111 .message_pending = iucv_callback_rx,
112 .message_complete = iucv_callback_txdone,
113 .path_quiesced = iucv_callback_shutdown,
116 static inline void high_nmcpy(unsigned char *dst, char *src)
121 static inline void low_nmcpy(unsigned char *dst, char *src)
123 memcpy(&dst[8], src, 8);
126 static int afiucv_pm_prepare(struct device *dev)
128 #ifdef CONFIG_PM_DEBUG
129 printk(KERN_WARNING "afiucv_pm_prepare\n");
134 static void afiucv_pm_complete(struct device *dev)
136 #ifdef CONFIG_PM_DEBUG
137 printk(KERN_WARNING "afiucv_pm_complete\n");
143 * afiucv_pm_freeze() - Freeze PM callback
144 * @dev: AFIUCV dummy device
146 * Sever all established IUCV communication pathes
148 static int afiucv_pm_freeze(struct device *dev)
150 struct iucv_sock *iucv;
152 struct hlist_node *node;
155 #ifdef CONFIG_PM_DEBUG
156 printk(KERN_WARNING "afiucv_pm_freeze\n");
158 read_lock(&iucv_sk_list.lock);
159 sk_for_each(sk, node, &iucv_sk_list.head) {
161 skb_queue_purge(&iucv->send_skb_q);
162 skb_queue_purge(&iucv->backlog_skb_q);
163 switch (sk->sk_state) {
169 err = iucv_path_sever(iucv->path, NULL);
170 iucv_path_free(iucv->path);
182 read_unlock(&iucv_sk_list.lock);
187 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
188 * @dev: AFIUCV dummy device
190 * socket clean up after freeze
192 static int afiucv_pm_restore_thaw(struct device *dev)
194 struct iucv_sock *iucv;
196 struct hlist_node *node;
198 #ifdef CONFIG_PM_DEBUG
199 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
201 read_lock(&iucv_sk_list.lock);
202 sk_for_each(sk, node, &iucv_sk_list.head) {
204 switch (sk->sk_state) {
207 sk->sk_state = IUCV_DISCONN;
208 sk->sk_state_change(sk);
220 read_unlock(&iucv_sk_list.lock);
224 static const struct dev_pm_ops afiucv_pm_ops = {
225 .prepare = afiucv_pm_prepare,
226 .complete = afiucv_pm_complete,
227 .freeze = afiucv_pm_freeze,
228 .thaw = afiucv_pm_restore_thaw,
229 .restore = afiucv_pm_restore_thaw,
232 static struct device_driver af_iucv_driver = {
233 .owner = THIS_MODULE,
236 .pm = &afiucv_pm_ops,
239 /* dummy device used as trigger for PM functions */
240 static struct device *af_iucv_dev;
243 * iucv_msg_length() - Returns the length of an iucv message.
244 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
246 * The function returns the length of the specified iucv message @msg of data
247 * stored in a buffer and of data stored in the parameter list (PRMDATA).
249 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
251 * PRMDATA[0..6] socket data (max 7 bytes);
252 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
254 * The socket data length is computed by substracting the socket data length
256 * If the socket data len is greater 7, then PRMDATA can be used for special
257 * notifications (see iucv_sock_shutdown); and further,
258 * if the socket data len is > 7, the function returns 8.
260 * Use this function to allocate socket buffers to store iucv message data.
262 static inline size_t iucv_msg_length(struct iucv_message *msg)
266 if (msg->flags & IUCV_IPRMDATA) {
267 datalen = 0xff - msg->rmmsg[7];
268 return (datalen < 8) ? datalen : 8;
274 * iucv_sock_in_state() - check for specific states
275 * @sk: sock structure
276 * @state: first iucv sk state
277 * @state: second iucv sk state
279 * Returns true if the socket in either in the first or second state.
281 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
283 return (sk->sk_state == state || sk->sk_state == state2);
287 * iucv_below_msglim() - function to check if messages can be sent
288 * @sk: sock structure
290 * Returns true if the send queue length is lower than the message limit.
291 * Always returns true if the socket is not connected (no iucv path for
292 * checking the message limit).
294 static inline int iucv_below_msglim(struct sock *sk)
296 struct iucv_sock *iucv = iucv_sk(sk);
298 if (sk->sk_state != IUCV_CONNECTED)
300 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
304 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
306 static void iucv_sock_wake_msglim(struct sock *sk)
308 struct socket_wq *wq;
311 wq = rcu_dereference(sk->sk_wq);
312 if (wq_has_sleeper(wq))
313 wake_up_interruptible_all(&wq->wait);
314 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
319 static void iucv_sock_timeout(unsigned long arg)
321 struct sock *sk = (struct sock *)arg;
324 sk->sk_err = ETIMEDOUT;
325 sk->sk_state_change(sk);
332 static void iucv_sock_clear_timer(struct sock *sk)
334 sk_stop_timer(sk, &sk->sk_timer);
337 static struct sock *__iucv_get_sock_by_name(char *nm)
340 struct hlist_node *node;
342 sk_for_each(sk, node, &iucv_sk_list.head)
343 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
349 static void iucv_sock_destruct(struct sock *sk)
351 skb_queue_purge(&sk->sk_receive_queue);
352 skb_queue_purge(&sk->sk_write_queue);
356 static void iucv_sock_cleanup_listen(struct sock *parent)
360 /* Close non-accepted connections */
361 while ((sk = iucv_accept_dequeue(parent, NULL))) {
366 parent->sk_state = IUCV_CLOSED;
369 /* Kill socket (only if zapped and orphaned) */
370 static void iucv_sock_kill(struct sock *sk)
372 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
375 iucv_sock_unlink(&iucv_sk_list, sk);
376 sock_set_flag(sk, SOCK_DEAD);
380 /* Close an IUCV socket */
381 static void iucv_sock_close(struct sock *sk)
383 unsigned char user_data[16];
384 struct iucv_sock *iucv = iucv_sk(sk);
388 iucv_sock_clear_timer(sk);
391 switch (sk->sk_state) {
393 iucv_sock_cleanup_listen(sk);
400 sk->sk_state = IUCV_CLOSING;
401 sk->sk_state_change(sk);
403 if (!skb_queue_empty(&iucv->send_skb_q)) {
404 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
405 timeo = sk->sk_lingertime;
407 timeo = IUCV_DISCONN_TIMEOUT;
408 err = iucv_sock_wait(sk,
409 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
413 case IUCV_CLOSING: /* fall through */
414 sk->sk_state = IUCV_CLOSED;
415 sk->sk_state_change(sk);
418 low_nmcpy(user_data, iucv->src_name);
419 high_nmcpy(user_data, iucv->dst_name);
420 ASCEBC(user_data, sizeof(user_data));
421 err = iucv_path_sever(iucv->path, user_data);
422 iucv_path_free(iucv->path);
426 sk->sk_err = ECONNRESET;
427 sk->sk_state_change(sk);
429 skb_queue_purge(&iucv->send_skb_q);
430 skb_queue_purge(&iucv->backlog_skb_q);
434 /* nothing to do here */
438 /* mark socket for deletion by iucv_sock_kill() */
439 sock_set_flag(sk, SOCK_ZAPPED);
444 static void iucv_sock_init(struct sock *sk, struct sock *parent)
447 sk->sk_type = parent->sk_type;
450 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
454 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
458 sock_init_data(sock, sk);
459 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
460 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
461 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
462 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
463 spin_lock_init(&iucv_sk(sk)->message_q.lock);
464 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
465 iucv_sk(sk)->send_tag = 0;
466 iucv_sk(sk)->flags = 0;
467 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
468 iucv_sk(sk)->path = NULL;
469 memset(&iucv_sk(sk)->src_user_id , 0, 32);
471 sk->sk_destruct = iucv_sock_destruct;
472 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
473 sk->sk_allocation = GFP_DMA;
475 sock_reset_flag(sk, SOCK_ZAPPED);
477 sk->sk_protocol = proto;
478 sk->sk_state = IUCV_OPEN;
480 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
482 iucv_sock_link(&iucv_sk_list, sk);
486 /* Create an IUCV socket */
487 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
492 if (protocol && protocol != PF_IUCV)
493 return -EPROTONOSUPPORT;
495 sock->state = SS_UNCONNECTED;
497 switch (sock->type) {
499 sock->ops = &iucv_sock_ops;
502 /* currently, proto ops can handle both sk types */
503 sock->ops = &iucv_sock_ops;
506 return -ESOCKTNOSUPPORT;
509 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
513 iucv_sock_init(sk, NULL);
518 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
520 write_lock_bh(&l->lock);
521 sk_add_node(sk, &l->head);
522 write_unlock_bh(&l->lock);
525 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
527 write_lock_bh(&l->lock);
528 sk_del_node_init(sk);
529 write_unlock_bh(&l->lock);
532 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
535 struct iucv_sock *par = iucv_sk(parent);
538 spin_lock_irqsave(&par->accept_q_lock, flags);
539 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
540 spin_unlock_irqrestore(&par->accept_q_lock, flags);
541 iucv_sk(sk)->parent = parent;
542 sk_acceptq_added(parent);
545 void iucv_accept_unlink(struct sock *sk)
548 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
550 spin_lock_irqsave(&par->accept_q_lock, flags);
551 list_del_init(&iucv_sk(sk)->accept_q);
552 spin_unlock_irqrestore(&par->accept_q_lock, flags);
553 sk_acceptq_removed(iucv_sk(sk)->parent);
554 iucv_sk(sk)->parent = NULL;
558 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
560 struct iucv_sock *isk, *n;
563 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
564 sk = (struct sock *) isk;
567 if (sk->sk_state == IUCV_CLOSED) {
568 iucv_accept_unlink(sk);
573 if (sk->sk_state == IUCV_CONNECTED ||
574 sk->sk_state == IUCV_SEVERED ||
575 sk->sk_state == IUCV_DISCONN || /* due to PM restore */
577 iucv_accept_unlink(sk);
579 sock_graft(sk, newsock);
581 if (sk->sk_state == IUCV_SEVERED)
582 sk->sk_state = IUCV_DISCONN;
593 /* Bind an unbound socket */
594 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
597 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
598 struct sock *sk = sock->sk;
599 struct iucv_sock *iucv;
602 /* Verify the input sockaddr */
603 if (!addr || addr->sa_family != AF_IUCV)
607 if (sk->sk_state != IUCV_OPEN) {
612 write_lock_bh(&iucv_sk_list.lock);
615 if (__iucv_get_sock_by_name(sa->siucv_name)) {
624 /* Bind the socket */
625 memcpy(iucv->src_name, sa->siucv_name, 8);
627 /* Copy the user id */
628 memcpy(iucv->src_user_id, iucv_userid, 8);
629 sk->sk_state = IUCV_BOUND;
633 /* Release the socket list lock */
634 write_unlock_bh(&iucv_sk_list.lock);
640 /* Automatically bind an unbound socket */
641 static int iucv_sock_autobind(struct sock *sk)
643 struct iucv_sock *iucv = iucv_sk(sk);
644 char query_buffer[80];
648 /* Set the userid and name */
649 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
653 memcpy(iucv->src_user_id, query_buffer, 8);
655 write_lock_bh(&iucv_sk_list.lock);
657 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
658 while (__iucv_get_sock_by_name(name)) {
659 sprintf(name, "%08x",
660 atomic_inc_return(&iucv_sk_list.autobind_name));
663 write_unlock_bh(&iucv_sk_list.lock);
665 memcpy(&iucv->src_name, name, 8);
670 /* Connect an unconnected socket */
671 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
674 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
675 struct sock *sk = sock->sk;
676 struct iucv_sock *iucv;
677 unsigned char user_data[16];
680 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
683 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
686 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
689 if (sk->sk_state == IUCV_OPEN) {
690 err = iucv_sock_autobind(sk);
697 /* Set the destination information */
698 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
699 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
701 high_nmcpy(user_data, sa->siucv_name);
702 low_nmcpy(user_data, iucv_sk(sk)->src_name);
703 ASCEBC(user_data, sizeof(user_data));
707 iucv->path = iucv_path_alloc(iucv->msglimit,
708 IUCV_IPRMDATA, GFP_KERNEL);
713 err = iucv_path_connect(iucv->path, &af_iucv_handler,
714 sa->siucv_user_id, NULL, user_data, sk);
716 iucv_path_free(iucv->path);
719 case 0x0b: /* Target communicator is not logged on */
722 case 0x0d: /* Max connections for this guest exceeded */
723 case 0x0e: /* Max connections for target guest exceeded */
726 case 0x0f: /* Missing IUCV authorization */
736 if (sk->sk_state != IUCV_CONNECTED) {
737 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
739 sock_sndtimeo(sk, flags & O_NONBLOCK));
742 if (sk->sk_state == IUCV_DISCONN) {
747 iucv_path_sever(iucv->path, NULL);
748 iucv_path_free(iucv->path);
757 /* Move a socket into listening state. */
758 static int iucv_sock_listen(struct socket *sock, int backlog)
760 struct sock *sk = sock->sk;
766 if (sk->sk_state != IUCV_BOUND)
769 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
772 sk->sk_max_ack_backlog = backlog;
773 sk->sk_ack_backlog = 0;
774 sk->sk_state = IUCV_LISTEN;
782 /* Accept a pending connection */
783 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
786 DECLARE_WAITQUEUE(wait, current);
787 struct sock *sk = sock->sk, *nsk;
791 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
793 if (sk->sk_state != IUCV_LISTEN) {
798 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
800 /* Wait for an incoming connection */
801 add_wait_queue_exclusive(sk_sleep(sk), &wait);
802 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
803 set_current_state(TASK_INTERRUPTIBLE);
810 timeo = schedule_timeout(timeo);
811 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
813 if (sk->sk_state != IUCV_LISTEN) {
818 if (signal_pending(current)) {
819 err = sock_intr_errno(timeo);
824 set_current_state(TASK_RUNNING);
825 remove_wait_queue(sk_sleep(sk), &wait);
830 newsock->state = SS_CONNECTED;
837 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
840 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
841 struct sock *sk = sock->sk;
843 addr->sa_family = AF_IUCV;
844 *len = sizeof(struct sockaddr_iucv);
847 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
848 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
850 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
851 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
853 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
854 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
855 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
861 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
863 * @msg: Pointer to a struct iucv_message
864 * @skb: The socket data to send, skb->len MUST BE <= 7
866 * Send the socket data in the parameter list in the iucv message
867 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
868 * list and the socket data len at index 7 (last byte).
869 * See also iucv_msg_length().
871 * Returns the error code from the iucv_message_send() call.
873 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
878 memcpy(prmdata, (void *) skb->data, skb->len);
879 prmdata[7] = 0xff - (u8) skb->len;
880 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
881 (void *) prmdata, 8);
884 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
885 struct msghdr *msg, size_t len)
887 struct sock *sk = sock->sk;
888 struct iucv_sock *iucv = iucv_sk(sk);
890 struct iucv_message txmsg;
891 struct cmsghdr *cmsg;
897 int noblock = msg->msg_flags & MSG_DONTWAIT;
899 err = sock_error(sk);
903 if (msg->msg_flags & MSG_OOB)
906 /* SOCK_SEQPACKET: we do not support segmented records */
907 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
912 if (sk->sk_shutdown & SEND_SHUTDOWN) {
917 /* Return if the socket is not in connected state */
918 if (sk->sk_state != IUCV_CONNECTED) {
923 /* initialize defaults */
924 cmsg_done = 0; /* check for duplicate headers */
927 /* iterate over control messages */
928 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
929 cmsg = CMSG_NXTHDR(msg, cmsg)) {
931 if (!CMSG_OK(msg, cmsg)) {
936 if (cmsg->cmsg_level != SOL_IUCV)
939 if (cmsg->cmsg_type & cmsg_done) {
943 cmsg_done |= cmsg->cmsg_type;
945 switch (cmsg->cmsg_type) {
946 case SCM_IUCV_TRGCLS:
947 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
952 /* set iucv message target class */
954 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
965 /* allocate one skb for each iucv message:
966 * this is fine for SOCK_SEQPACKET (unless we want to support
967 * segmented records using the MSG_EOR flag), but
968 * for SOCK_STREAM we might want to improve it in future */
969 skb = sock_alloc_send_skb(sk, len, noblock, &err);
972 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
977 /* wait if outstanding messages for iucv path has reached */
978 timeo = sock_sndtimeo(sk, noblock);
979 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
983 /* return -ECONNRESET if the socket is no longer connected */
984 if (sk->sk_state != IUCV_CONNECTED) {
989 /* increment and save iucv message tag for msg_completion cbk */
990 txmsg.tag = iucv->send_tag++;
991 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
992 skb_queue_tail(&iucv->send_skb_q, skb);
994 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
996 err = iucv_send_iprm(iucv->path, &txmsg, skb);
998 /* on success: there is no message_complete callback
999 * for an IPRMDATA msg; remove skb from send queue */
1001 skb_unlink(skb, &iucv->send_skb_q);
1005 /* this error should never happen since the
1006 * IUCV_IPRMDATA path flag is set... sever path */
1008 iucv_path_sever(iucv->path, NULL);
1009 skb_unlink(skb, &iucv->send_skb_q);
1014 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
1015 (void *) skb->data, skb->len);
1019 memcpy(user_id, iucv->dst_user_id, 8);
1021 memcpy(appl_id, iucv->dst_name, 8);
1022 pr_err("Application %s on z/VM guest %s"
1023 " exceeds message limit\n",
1028 skb_unlink(skb, &iucv->send_skb_q);
1042 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1044 * Locking: must be called with message_q.lock held
1046 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1048 int dataleft, size, copied = 0;
1049 struct sk_buff *nskb;
1053 if (dataleft >= sk->sk_rcvbuf / 4)
1054 size = sk->sk_rcvbuf / 4;
1058 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1062 /* copy target class to control buffer of new skb */
1063 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1065 /* copy data fragment */
1066 memcpy(nskb->data, skb->data + copied, size);
1070 skb_reset_transport_header(nskb);
1071 skb_reset_network_header(nskb);
1074 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1080 /* iucv_process_message() - Receive a single outstanding IUCV message
1082 * Locking: must be called with message_q.lock held
1084 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1085 struct iucv_path *path,
1086 struct iucv_message *msg)
1091 len = iucv_msg_length(msg);
1093 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1094 /* Note: the first 4 bytes are reserved for msg tag */
1095 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1097 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1098 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1099 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1104 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
1105 skb->data, len, NULL);
1110 /* we need to fragment iucv messages for SOCK_STREAM only;
1111 * for SOCK_SEQPACKET, it is only relevant if we support
1112 * record segmentation using MSG_EOR (see also recvmsg()) */
1113 if (sk->sk_type == SOCK_STREAM &&
1114 skb->truesize >= sk->sk_rcvbuf / 4) {
1115 rc = iucv_fragment_skb(sk, skb, len);
1119 iucv_path_sever(path, NULL);
1122 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1124 skb_reset_transport_header(skb);
1125 skb_reset_network_header(skb);
1130 if (sock_queue_rcv_skb(sk, skb))
1131 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1134 /* iucv_process_message_q() - Process outstanding IUCV messages
1136 * Locking: must be called with message_q.lock held
1138 static void iucv_process_message_q(struct sock *sk)
1140 struct iucv_sock *iucv = iucv_sk(sk);
1141 struct sk_buff *skb;
1142 struct sock_msg_q *p, *n;
1144 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1145 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1148 iucv_process_message(sk, skb, p->path, &p->msg);
1151 if (!skb_queue_empty(&iucv->backlog_skb_q))
1156 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1157 struct msghdr *msg, size_t len, int flags)
1159 int noblock = flags & MSG_DONTWAIT;
1160 struct sock *sk = sock->sk;
1161 struct iucv_sock *iucv = iucv_sk(sk);
1162 unsigned int copied, rlen;
1163 struct sk_buff *skb, *rskb, *cskb;
1166 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
1167 skb_queue_empty(&iucv->backlog_skb_q) &&
1168 skb_queue_empty(&sk->sk_receive_queue) &&
1169 list_empty(&iucv->message_q.list))
1172 if (flags & (MSG_OOB))
1175 /* receive/dequeue next skb:
1176 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1177 skb = skb_recv_datagram(sk, flags, noblock, &err);
1179 if (sk->sk_shutdown & RCV_SHUTDOWN)
1184 rlen = skb->len; /* real length of skb */
1185 copied = min_t(unsigned int, rlen, len);
1188 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
1189 if (!(flags & MSG_PEEK))
1190 skb_queue_head(&sk->sk_receive_queue, skb);
1194 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1195 if (sk->sk_type == SOCK_SEQPACKET) {
1197 msg->msg_flags |= MSG_TRUNC;
1198 /* each iucv message contains a complete record */
1199 msg->msg_flags |= MSG_EOR;
1202 /* create control message to store iucv msg target class:
1203 * get the trgcls from the control buffer of the skb due to
1204 * fragmentation of original iucv message. */
1205 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1206 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1208 if (!(flags & MSG_PEEK))
1209 skb_queue_head(&sk->sk_receive_queue, skb);
1213 /* Mark read part of skb as used */
1214 if (!(flags & MSG_PEEK)) {
1216 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1217 if (sk->sk_type == SOCK_STREAM) {
1218 skb_pull(skb, copied);
1220 skb_queue_head(&sk->sk_receive_queue, skb);
1227 /* Queue backlog skbs */
1228 spin_lock_bh(&iucv->message_q.lock);
1229 rskb = skb_dequeue(&iucv->backlog_skb_q);
1231 if (sock_queue_rcv_skb(sk, rskb)) {
1232 skb_queue_head(&iucv->backlog_skb_q,
1236 rskb = skb_dequeue(&iucv->backlog_skb_q);
1239 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1240 if (!list_empty(&iucv->message_q.list))
1241 iucv_process_message_q(sk);
1243 spin_unlock_bh(&iucv->message_q.lock);
1247 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1248 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1254 static inline unsigned int iucv_accept_poll(struct sock *parent)
1256 struct iucv_sock *isk, *n;
1259 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1260 sk = (struct sock *) isk;
1262 if (sk->sk_state == IUCV_CONNECTED)
1263 return POLLIN | POLLRDNORM;
1269 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1272 struct sock *sk = sock->sk;
1273 unsigned int mask = 0;
1275 sock_poll_wait(file, sk_sleep(sk), wait);
1277 if (sk->sk_state == IUCV_LISTEN)
1278 return iucv_accept_poll(sk);
1280 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1283 if (sk->sk_shutdown & RCV_SHUTDOWN)
1286 if (sk->sk_shutdown == SHUTDOWN_MASK)
1289 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1290 (sk->sk_shutdown & RCV_SHUTDOWN))
1291 mask |= POLLIN | POLLRDNORM;
1293 if (sk->sk_state == IUCV_CLOSED)
1296 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1299 if (sock_writeable(sk))
1300 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1302 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1307 static int iucv_sock_shutdown(struct socket *sock, int how)
1309 struct sock *sk = sock->sk;
1310 struct iucv_sock *iucv = iucv_sk(sk);
1311 struct iucv_message txmsg;
1316 if ((how & ~SHUTDOWN_MASK) || !how)
1320 switch (sk->sk_state) {
1329 sk->sk_shutdown |= how;
1333 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1336 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1337 (void *) iprm_shutdown, 8);
1353 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1354 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1358 skb_queue_purge(&sk->sk_receive_queue);
1361 /* Wake up anyone sleeping in poll */
1362 sk->sk_state_change(sk);
1369 static int iucv_sock_release(struct socket *sock)
1371 struct sock *sk = sock->sk;
1377 iucv_sock_close(sk);
1379 /* Unregister with IUCV base support */
1380 if (iucv_sk(sk)->path) {
1381 iucv_path_sever(iucv_sk(sk)->path, NULL);
1382 iucv_path_free(iucv_sk(sk)->path);
1383 iucv_sk(sk)->path = NULL;
1391 /* getsockopt and setsockopt */
1392 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1393 char __user *optval, unsigned int optlen)
1395 struct sock *sk = sock->sk;
1396 struct iucv_sock *iucv = iucv_sk(sk);
1400 if (level != SOL_IUCV)
1401 return -ENOPROTOOPT;
1403 if (optlen < sizeof(int))
1406 if (get_user(val, (int __user *) optval))
1413 case SO_IPRMDATA_MSG:
1415 iucv->flags |= IUCV_IPRMDATA;
1417 iucv->flags &= ~IUCV_IPRMDATA;
1420 switch (sk->sk_state) {
1423 if (val < 1 || val > (u16)(~0))
1426 iucv->msglimit = val;
1442 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1443 char __user *optval, int __user *optlen)
1445 struct sock *sk = sock->sk;
1446 struct iucv_sock *iucv = iucv_sk(sk);
1449 if (level != SOL_IUCV)
1450 return -ENOPROTOOPT;
1452 if (get_user(len, optlen))
1458 len = min_t(unsigned int, len, sizeof(int));
1461 case SO_IPRMDATA_MSG:
1462 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1466 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1467 : iucv->msglimit; /* default */
1471 return -ENOPROTOOPT;
1474 if (put_user(len, optlen))
1476 if (copy_to_user(optval, &val, len))
1483 /* Callback wrappers - called from iucv base support */
1484 static int iucv_callback_connreq(struct iucv_path *path,
1485 u8 ipvmid[8], u8 ipuser[16])
1487 unsigned char user_data[16];
1488 unsigned char nuser_data[16];
1489 unsigned char src_name[8];
1490 struct hlist_node *node;
1491 struct sock *sk, *nsk;
1492 struct iucv_sock *iucv, *niucv;
1495 memcpy(src_name, ipuser, 8);
1496 EBCASC(src_name, 8);
1497 /* Find out if this path belongs to af_iucv. */
1498 read_lock(&iucv_sk_list.lock);
1501 sk_for_each(sk, node, &iucv_sk_list.head)
1502 if (sk->sk_state == IUCV_LISTEN &&
1503 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1505 * Found a listening socket with
1506 * src_name == ipuser[0-7].
1511 read_unlock(&iucv_sk_list.lock);
1513 /* No socket found, not one of our paths. */
1518 /* Check if parent socket is listening */
1519 low_nmcpy(user_data, iucv->src_name);
1520 high_nmcpy(user_data, iucv->dst_name);
1521 ASCEBC(user_data, sizeof(user_data));
1522 if (sk->sk_state != IUCV_LISTEN) {
1523 err = iucv_path_sever(path, user_data);
1524 iucv_path_free(path);
1528 /* Check for backlog size */
1529 if (sk_acceptq_is_full(sk)) {
1530 err = iucv_path_sever(path, user_data);
1531 iucv_path_free(path);
1535 /* Create the new socket */
1536 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1538 err = iucv_path_sever(path, user_data);
1539 iucv_path_free(path);
1543 niucv = iucv_sk(nsk);
1544 iucv_sock_init(nsk, sk);
1546 /* Set the new iucv_sock */
1547 memcpy(niucv->dst_name, ipuser + 8, 8);
1548 EBCASC(niucv->dst_name, 8);
1549 memcpy(niucv->dst_user_id, ipvmid, 8);
1550 memcpy(niucv->src_name, iucv->src_name, 8);
1551 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1554 /* Call iucv_accept */
1555 high_nmcpy(nuser_data, ipuser + 8);
1556 memcpy(nuser_data + 8, niucv->src_name, 8);
1557 ASCEBC(nuser_data + 8, 8);
1559 /* set message limit for path based on msglimit of accepting socket */
1560 niucv->msglimit = iucv->msglimit;
1561 path->msglim = iucv->msglimit;
1562 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1564 err = iucv_path_sever(path, user_data);
1565 iucv_path_free(path);
1566 iucv_sock_kill(nsk);
1570 iucv_accept_enqueue(sk, nsk);
1572 /* Wake up accept */
1573 nsk->sk_state = IUCV_CONNECTED;
1574 sk->sk_data_ready(sk, 1);
1581 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1583 struct sock *sk = path->private;
1585 sk->sk_state = IUCV_CONNECTED;
1586 sk->sk_state_change(sk);
1589 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1591 struct sock *sk = path->private;
1592 struct iucv_sock *iucv = iucv_sk(sk);
1593 struct sk_buff *skb;
1594 struct sock_msg_q *save_msg;
1597 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1598 iucv_message_reject(path, msg);
1602 spin_lock(&iucv->message_q.lock);
1604 if (!list_empty(&iucv->message_q.list) ||
1605 !skb_queue_empty(&iucv->backlog_skb_q))
1608 len = atomic_read(&sk->sk_rmem_alloc);
1609 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1610 if (len > sk->sk_rcvbuf)
1613 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1617 iucv_process_message(sk, skb, path, msg);
1621 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1624 save_msg->path = path;
1625 save_msg->msg = *msg;
1627 list_add_tail(&save_msg->list, &iucv->message_q.list);
1630 spin_unlock(&iucv->message_q.lock);
1633 static void iucv_callback_txdone(struct iucv_path *path,
1634 struct iucv_message *msg)
1636 struct sock *sk = path->private;
1637 struct sk_buff *this = NULL;
1638 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1639 struct sk_buff *list_skb = list->next;
1640 unsigned long flags;
1642 if (!skb_queue_empty(list)) {
1643 spin_lock_irqsave(&list->lock, flags);
1645 while (list_skb != (struct sk_buff *)list) {
1646 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1650 list_skb = list_skb->next;
1653 __skb_unlink(this, list);
1655 spin_unlock_irqrestore(&list->lock, flags);
1659 /* wake up any process waiting for sending */
1660 iucv_sock_wake_msglim(sk);
1665 if (sk->sk_state == IUCV_CLOSING) {
1666 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1667 sk->sk_state = IUCV_CLOSED;
1668 sk->sk_state_change(sk);
1674 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1676 struct sock *sk = path->private;
1678 if (!list_empty(&iucv_sk(sk)->accept_q))
1679 sk->sk_state = IUCV_SEVERED;
1681 sk->sk_state = IUCV_DISCONN;
1683 sk->sk_state_change(sk);
1686 /* called if the other communication side shuts down its RECV direction;
1687 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1689 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1691 struct sock *sk = path->private;
1694 if (sk->sk_state != IUCV_CLOSED) {
1695 sk->sk_shutdown |= SEND_SHUTDOWN;
1696 sk->sk_state_change(sk);
1701 static const struct proto_ops iucv_sock_ops = {
1703 .owner = THIS_MODULE,
1704 .release = iucv_sock_release,
1705 .bind = iucv_sock_bind,
1706 .connect = iucv_sock_connect,
1707 .listen = iucv_sock_listen,
1708 .accept = iucv_sock_accept,
1709 .getname = iucv_sock_getname,
1710 .sendmsg = iucv_sock_sendmsg,
1711 .recvmsg = iucv_sock_recvmsg,
1712 .poll = iucv_sock_poll,
1713 .ioctl = sock_no_ioctl,
1714 .mmap = sock_no_mmap,
1715 .socketpair = sock_no_socketpair,
1716 .shutdown = iucv_sock_shutdown,
1717 .setsockopt = iucv_sock_setsockopt,
1718 .getsockopt = iucv_sock_getsockopt,
1721 static const struct net_proto_family iucv_sock_family_ops = {
1723 .owner = THIS_MODULE,
1724 .create = iucv_sock_create,
1727 static int __init afiucv_init(void)
1731 if (!MACHINE_IS_VM) {
1732 pr_err("The af_iucv module cannot be loaded"
1734 err = -EPROTONOSUPPORT;
1737 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1738 if (unlikely(err)) {
1740 err = -EPROTONOSUPPORT;
1744 err = iucv_register(&af_iucv_handler, 0);
1747 err = proto_register(&iucv_proto, 0);
1750 err = sock_register(&iucv_sock_family_ops);
1753 /* establish dummy device */
1754 err = driver_register(&af_iucv_driver);
1757 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1762 dev_set_name(af_iucv_dev, "af_iucv");
1763 af_iucv_dev->bus = &iucv_bus;
1764 af_iucv_dev->parent = iucv_root;
1765 af_iucv_dev->release = (void (*)(struct device *))kfree;
1766 af_iucv_dev->driver = &af_iucv_driver;
1767 err = device_register(af_iucv_dev);
1774 driver_unregister(&af_iucv_driver);
1776 sock_unregister(PF_IUCV);
1778 proto_unregister(&iucv_proto);
1780 iucv_unregister(&af_iucv_handler, 0);
1785 static void __exit afiucv_exit(void)
1787 device_unregister(af_iucv_dev);
1788 driver_unregister(&af_iucv_driver);
1789 sock_unregister(PF_IUCV);
1790 proto_unregister(&iucv_proto);
1791 iucv_unregister(&af_iucv_handler, 0);
1794 module_init(afiucv_init);
1795 module_exit(afiucv_exit);
1797 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1798 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1799 MODULE_VERSION(VERSION);
1800 MODULE_LICENSE("GPL");
1801 MODULE_ALIAS_NETPROTO(PF_IUCV);