4 * Copyright IBM Corp. 2001, 2009
7 * Original netiucv driver:
8 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 * Sysfs integration and all bugs therein:
10 * Cornelia Huck (cornelia.huck@de.ibm.com)
12 * Ursula Braun (ursula.braun@de.ibm.com)
15 * the source of the original IUCV driver by:
16 * Stefan Hegewald <hegewald@de.ibm.com>
17 * Hartmut Penner <hpenner@de.ibm.com>
18 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 * Martin Schwidefsky (schwidefsky@de.ibm.com)
20 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
65 #include <linux/uaccess.h>
66 #include <asm/ebcdic.h>
68 #include <net/iucv/iucv.h>
72 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
76 * Debug Facility stuff
78 #define IUCV_DBF_SETUP_NAME "iucv_setup"
79 #define IUCV_DBF_SETUP_LEN 64
80 #define IUCV_DBF_SETUP_PAGES 2
81 #define IUCV_DBF_SETUP_NR_AREAS 1
82 #define IUCV_DBF_SETUP_LEVEL 3
84 #define IUCV_DBF_DATA_NAME "iucv_data"
85 #define IUCV_DBF_DATA_LEN 128
86 #define IUCV_DBF_DATA_PAGES 2
87 #define IUCV_DBF_DATA_NR_AREAS 1
88 #define IUCV_DBF_DATA_LEVEL 2
90 #define IUCV_DBF_TRACE_NAME "iucv_trace"
91 #define IUCV_DBF_TRACE_LEN 16
92 #define IUCV_DBF_TRACE_PAGES 4
93 #define IUCV_DBF_TRACE_NR_AREAS 1
94 #define IUCV_DBF_TRACE_LEVEL 3
96 #define IUCV_DBF_TEXT(name,level,text) \
98 debug_text_event(iucv_dbf_##name,level,text); \
101 #define IUCV_DBF_HEX(name,level,addr,len) \
103 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
106 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
108 #define IUCV_DBF_TEXT_(name, level, text...) \
110 if (debug_level_enabled(iucv_dbf_##name, level)) { \
111 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
112 sprintf(__buf, text); \
113 debug_text_event(iucv_dbf_##name, level, __buf); \
114 put_cpu_var(iucv_dbf_txt_buf); \
118 #define IUCV_DBF_SPRINTF(name,level,text...) \
120 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121 debug_sprintf_event(iucv_dbf_trace, level, text ); \
125 * some more debug stuff
127 #define PRINTK_HEADER " iucv: " /* for debugging */
129 /* dummy device to make sure netiucv_pm functions are called */
130 static struct device *netiucv_dev;
132 static int netiucv_pm_prepare(struct device *);
133 static void netiucv_pm_complete(struct device *);
134 static int netiucv_pm_freeze(struct device *);
135 static int netiucv_pm_restore_thaw(struct device *);
137 static const struct dev_pm_ops netiucv_pm_ops = {
138 .prepare = netiucv_pm_prepare,
139 .complete = netiucv_pm_complete,
140 .freeze = netiucv_pm_freeze,
141 .thaw = netiucv_pm_restore_thaw,
142 .restore = netiucv_pm_restore_thaw,
145 static struct device_driver netiucv_driver = {
146 .owner = THIS_MODULE,
149 .pm = &netiucv_pm_ops,
152 static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
153 static void netiucv_callback_connack(struct iucv_path *, u8 *);
154 static void netiucv_callback_connrej(struct iucv_path *, u8 *);
155 static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
156 static void netiucv_callback_connres(struct iucv_path *, u8 *);
157 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
158 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
160 static struct iucv_handler netiucv_handler = {
161 .path_pending = netiucv_callback_connreq,
162 .path_complete = netiucv_callback_connack,
163 .path_severed = netiucv_callback_connrej,
164 .path_quiesced = netiucv_callback_connsusp,
165 .path_resumed = netiucv_callback_connres,
166 .message_pending = netiucv_callback_rx,
167 .message_complete = netiucv_callback_txdone
171 * Per connection profiling data
173 struct connection_profile {
174 unsigned long maxmulti;
175 unsigned long maxcqueue;
176 unsigned long doios_single;
177 unsigned long doios_multi;
179 unsigned long tx_time;
180 unsigned long send_stamp;
181 unsigned long tx_pending;
182 unsigned long tx_max_pending;
186 * Representation of one iucv connection
188 struct iucv_connection {
189 struct list_head list;
190 struct iucv_path *path;
191 struct sk_buff *rx_buff;
192 struct sk_buff *tx_buff;
193 struct sk_buff_head collect_queue;
194 struct sk_buff_head commit_queue;
195 spinlock_t collect_lock;
200 struct net_device *netdev;
201 struct connection_profile prof;
207 * Linked list of all connection structs.
209 static LIST_HEAD(iucv_connection_list);
210 static DEFINE_RWLOCK(iucv_connection_rwlock);
213 * Representation of event-data for the
214 * connection state machine.
217 struct iucv_connection *conn;
222 * Private part of the network device structure
224 struct netiucv_priv {
225 struct net_device_stats stats;
228 struct iucv_connection *conn;
234 * Link level header for a packet.
240 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
241 #define NETIUCV_BUFSIZE_MAX 65537
242 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
243 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
244 #define NETIUCV_MTU_DEFAULT 9216
245 #define NETIUCV_QUEUELEN_DEFAULT 50
246 #define NETIUCV_TIMEOUT_5SEC 5000
249 * Compatibility macros for busy handling
250 * of network devices.
252 static inline void netiucv_clear_busy(struct net_device *dev)
254 struct netiucv_priv *priv = netdev_priv(dev);
255 clear_bit(0, &priv->tbusy);
256 netif_wake_queue(dev);
259 static inline int netiucv_test_and_set_busy(struct net_device *dev)
261 struct netiucv_priv *priv = netdev_priv(dev);
262 netif_stop_queue(dev);
263 return test_and_set_bit(0, &priv->tbusy);
266 static u8 iucvMagic_ascii[16] = {
267 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
268 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
271 static u8 iucvMagic_ebcdic[16] = {
272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
277 * Convert an iucv userId to its printable
278 * form (strip whitespace at end).
280 * @param An iucv userId
282 * @returns The printable string (static data!!)
284 static char *netiucv_printname(char *name, int len)
288 memcpy(tmp, name, len);
290 while (*p && ((p - tmp) < len) && (!isspace(*p)))
296 static char *netiucv_printuser(struct iucv_connection *conn)
298 static char tmp_uid[9];
299 static char tmp_udat[17];
300 static char buf[100];
302 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
305 memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
306 memcpy(tmp_udat, conn->userdata, 16);
307 EBCASC(tmp_udat, 16);
308 memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
309 sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
312 return netiucv_printname(conn->userid, 8);
316 * States of the interface statemachine.
324 * MUST be always the last element!!
329 static const char *dev_state_names[] = {
337 * Events of the interface statemachine.
345 * MUST be always the last element!!
350 static const char *dev_event_names[] = {
358 * Events of the connection statemachine
362 * Events, representing callbacks from
363 * lowlevel iucv layer)
374 * Events, representing errors return codes from
375 * calls to lowlevel iucv layer
379 * Event, representing timer expiry.
384 * Events, representing commands from upper levels.
390 * MUST be always the last element!!
395 static const char *conn_event_names[] = {
396 "Remote connection request",
397 "Remote connection acknowledge",
398 "Remote connection reject",
399 "Connection suspended",
400 "Connection resumed",
411 * States of the connection statemachine.
415 * Connection not assigned to any device,
416 * initial state, invalid
421 * Userid assigned but not operating
426 * Connection registered,
427 * no connection request sent yet,
428 * no connection request received
430 CONN_STATE_STARTWAIT,
433 * Connection registered and connection request sent,
434 * no acknowledge and no connection request received yet.
436 CONN_STATE_SETUPWAIT,
439 * Connection up and running idle
444 * Data sent, awaiting CONN_EVENT_TXDONE
449 * Error during registration.
454 * Error during registration.
459 * MUST be always the last element!!
464 static const char *conn_state_names[] = {
472 "Registration error",
478 * Debug Facility Stuff
480 static debug_info_t *iucv_dbf_setup = NULL;
481 static debug_info_t *iucv_dbf_data = NULL;
482 static debug_info_t *iucv_dbf_trace = NULL;
484 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
486 static void iucv_unregister_dbf_views(void)
488 debug_unregister(iucv_dbf_setup);
489 debug_unregister(iucv_dbf_data);
490 debug_unregister(iucv_dbf_trace);
492 static int iucv_register_dbf_views(void)
494 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
495 IUCV_DBF_SETUP_PAGES,
496 IUCV_DBF_SETUP_NR_AREAS,
498 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
500 IUCV_DBF_DATA_NR_AREAS,
502 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
503 IUCV_DBF_TRACE_PAGES,
504 IUCV_DBF_TRACE_NR_AREAS,
507 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
508 (iucv_dbf_trace == NULL)) {
509 iucv_unregister_dbf_views();
512 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
513 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
515 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
516 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
518 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
519 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
525 * Callback-wrappers, called from lowlevel iucv layer.
528 static void netiucv_callback_rx(struct iucv_path *path,
529 struct iucv_message *msg)
531 struct iucv_connection *conn = path->private;
532 struct iucv_event ev;
536 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
539 static void netiucv_callback_txdone(struct iucv_path *path,
540 struct iucv_message *msg)
542 struct iucv_connection *conn = path->private;
543 struct iucv_event ev;
547 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
550 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
552 struct iucv_connection *conn = path->private;
554 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
557 static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
560 struct iucv_connection *conn = path->private;
561 struct iucv_event ev;
562 static char tmp_user[9];
563 static char tmp_udat[17];
567 memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
568 memcpy(tmp_udat, ipuser, 16);
569 EBCASC(tmp_udat, 16);
570 read_lock_bh(&iucv_connection_rwlock);
571 list_for_each_entry(conn, &iucv_connection_list, list) {
572 if (strncmp(ipvmid, conn->userid, 8) ||
573 strncmp(ipuser, conn->userdata, 16))
575 /* Found a matching connection for this path. */
579 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
582 IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
583 tmp_user, netiucv_printname(tmp_udat, 16));
584 read_unlock_bh(&iucv_connection_rwlock);
588 static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
590 struct iucv_connection *conn = path->private;
592 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
595 static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
597 struct iucv_connection *conn = path->private;
599 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
602 static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
604 struct iucv_connection *conn = path->private;
606 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
610 * NOP action for statemachines
612 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
617 * Actions of the connection statemachine
622 * @conn: The connection where this skb has been received.
623 * @pskb: The received skb.
625 * Unpack a just received skb and hand it over to upper layers.
626 * Helper function for conn_action_rx.
628 static void netiucv_unpack_skb(struct iucv_connection *conn,
629 struct sk_buff *pskb)
631 struct net_device *dev = conn->netdev;
632 struct netiucv_priv *privptr = netdev_priv(dev);
635 skb_put(pskb, NETIUCV_HDRLEN);
637 pskb->ip_summed = CHECKSUM_NONE;
638 pskb->protocol = cpu_to_be16(ETH_P_IP);
642 struct ll_header *header = (struct ll_header *) pskb->data;
647 skb_pull(pskb, NETIUCV_HDRLEN);
648 header->next -= offset;
649 offset += header->next;
650 header->next -= NETIUCV_HDRLEN;
651 if (skb_tailroom(pskb) < header->next) {
652 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
653 header->next, skb_tailroom(pskb));
656 skb_put(pskb, header->next);
657 skb_reset_mac_header(pskb);
658 skb = dev_alloc_skb(pskb->len);
660 IUCV_DBF_TEXT(data, 2,
661 "Out of memory in netiucv_unpack_skb\n");
662 privptr->stats.rx_dropped++;
665 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
667 skb_reset_mac_header(skb);
668 skb->dev = pskb->dev;
669 skb->protocol = pskb->protocol;
670 pskb->ip_summed = CHECKSUM_UNNECESSARY;
671 privptr->stats.rx_packets++;
672 privptr->stats.rx_bytes += skb->len;
674 * Since receiving is always initiated from a tasklet (in iucv.c),
675 * we must use netif_rx_ni() instead of netif_rx()
678 skb_pull(pskb, header->next);
679 skb_put(pskb, NETIUCV_HDRLEN);
683 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
685 struct iucv_event *ev = arg;
686 struct iucv_connection *conn = ev->conn;
687 struct iucv_message *msg = ev->data;
688 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
691 IUCV_DBF_TEXT(trace, 4, __func__);
694 iucv_message_reject(conn->path, msg);
695 IUCV_DBF_TEXT(data, 2,
696 "Received data for unlinked connection\n");
699 if (msg->length > conn->max_buffsize) {
700 iucv_message_reject(conn->path, msg);
701 privptr->stats.rx_dropped++;
702 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
703 msg->length, conn->max_buffsize);
706 conn->rx_buff->data = conn->rx_buff->head;
707 skb_reset_tail_pointer(conn->rx_buff);
708 conn->rx_buff->len = 0;
709 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
711 if (rc || msg->length < 5) {
712 privptr->stats.rx_errors++;
713 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
716 netiucv_unpack_skb(conn, conn->rx_buff);
719 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
721 struct iucv_event *ev = arg;
722 struct iucv_connection *conn = ev->conn;
723 struct iucv_message *msg = ev->data;
724 struct iucv_message txmsg;
725 struct netiucv_priv *privptr = NULL;
726 u32 single_flag = msg->tag;
731 unsigned long saveflags;
732 struct ll_header header;
735 IUCV_DBF_TEXT(trace, 4, __func__);
737 if (!conn || !conn->netdev) {
738 IUCV_DBF_TEXT(data, 2,
739 "Send confirmation for unlinked connection\n");
742 privptr = netdev_priv(conn->netdev);
743 conn->prof.tx_pending--;
745 if ((skb = skb_dequeue(&conn->commit_queue))) {
746 atomic_dec(&skb->users);
748 privptr->stats.tx_packets++;
749 privptr->stats.tx_bytes +=
750 (skb->len - NETIUCV_HDRLEN
753 dev_kfree_skb_any(skb);
756 conn->tx_buff->data = conn->tx_buff->head;
757 skb_reset_tail_pointer(conn->tx_buff);
758 conn->tx_buff->len = 0;
759 spin_lock_irqsave(&conn->collect_lock, saveflags);
760 while ((skb = skb_dequeue(&conn->collect_queue))) {
761 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
762 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
764 skb_copy_from_linear_data(skb,
765 skb_put(conn->tx_buff, skb->len),
770 atomic_dec(&skb->users);
771 dev_kfree_skb_any(skb);
773 if (conn->collect_len > conn->prof.maxmulti)
774 conn->prof.maxmulti = conn->collect_len;
775 conn->collect_len = 0;
776 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
777 if (conn->tx_buff->len == 0) {
778 fsm_newstate(fi, CONN_STATE_IDLE);
783 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
784 conn->prof.send_stamp = jiffies;
787 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
788 conn->tx_buff->data, conn->tx_buff->len);
789 conn->prof.doios_multi++;
790 conn->prof.txlen += conn->tx_buff->len;
791 conn->prof.tx_pending++;
792 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
793 conn->prof.tx_max_pending = conn->prof.tx_pending;
795 conn->prof.tx_pending--;
796 fsm_newstate(fi, CONN_STATE_IDLE);
798 privptr->stats.tx_errors += txpackets;
799 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
802 privptr->stats.tx_packets += txpackets;
803 privptr->stats.tx_bytes += txbytes;
805 if (stat_maxcq > conn->prof.maxcqueue)
806 conn->prof.maxcqueue = stat_maxcq;
810 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
812 struct iucv_event *ev = arg;
813 struct iucv_connection *conn = ev->conn;
814 struct iucv_path *path = ev->data;
815 struct net_device *netdev = conn->netdev;
816 struct netiucv_priv *privptr = netdev_priv(netdev);
819 IUCV_DBF_TEXT(trace, 3, __func__);
822 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
824 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
826 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
829 fsm_newstate(fi, CONN_STATE_IDLE);
830 netdev->tx_queue_len = conn->path->msglim;
831 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
834 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
836 struct iucv_event *ev = arg;
837 struct iucv_path *path = ev->data;
839 IUCV_DBF_TEXT(trace, 3, __func__);
840 iucv_path_sever(path, NULL);
843 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
845 struct iucv_connection *conn = arg;
846 struct net_device *netdev = conn->netdev;
847 struct netiucv_priv *privptr = netdev_priv(netdev);
849 IUCV_DBF_TEXT(trace, 3, __func__);
850 fsm_deltimer(&conn->timer);
851 fsm_newstate(fi, CONN_STATE_IDLE);
852 netdev->tx_queue_len = conn->path->msglim;
853 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
856 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
858 struct iucv_connection *conn = arg;
860 IUCV_DBF_TEXT(trace, 3, __func__);
861 fsm_deltimer(&conn->timer);
862 iucv_path_sever(conn->path, conn->userdata);
863 fsm_newstate(fi, CONN_STATE_STARTWAIT);
866 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
868 struct iucv_connection *conn = arg;
869 struct net_device *netdev = conn->netdev;
870 struct netiucv_priv *privptr = netdev_priv(netdev);
872 IUCV_DBF_TEXT(trace, 3, __func__);
874 fsm_deltimer(&conn->timer);
875 iucv_path_sever(conn->path, conn->userdata);
876 dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
877 "connection\n", netiucv_printuser(conn));
878 IUCV_DBF_TEXT(data, 2,
879 "conn_action_connsever: Remote dropped connection\n");
880 fsm_newstate(fi, CONN_STATE_STARTWAIT);
881 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
884 static void conn_action_start(fsm_instance *fi, int event, void *arg)
886 struct iucv_connection *conn = arg;
887 struct net_device *netdev = conn->netdev;
888 struct netiucv_priv *privptr = netdev_priv(netdev);
891 IUCV_DBF_TEXT(trace, 3, __func__);
893 fsm_newstate(fi, CONN_STATE_STARTWAIT);
896 * We must set the state before calling iucv_connect because the
897 * callback handler could be called at any point after the connection
901 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
902 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
903 IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
904 netdev->name, netiucv_printuser(conn));
906 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
907 NULL, conn->userdata, conn);
910 netdev->tx_queue_len = conn->path->msglim;
911 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
912 CONN_EVENT_TIMER, conn);
915 dev_warn(privptr->dev,
916 "The IUCV device failed to connect to z/VM guest %s\n",
917 netiucv_printname(conn->userid, 8));
918 fsm_newstate(fi, CONN_STATE_STARTWAIT);
921 dev_warn(privptr->dev,
922 "The IUCV device failed to connect to the peer on z/VM"
923 " guest %s\n", netiucv_printname(conn->userid, 8));
924 fsm_newstate(fi, CONN_STATE_STARTWAIT);
927 dev_err(privptr->dev,
928 "Connecting the IUCV device would exceed the maximum"
929 " number of IUCV connections\n");
930 fsm_newstate(fi, CONN_STATE_CONNERR);
933 dev_err(privptr->dev,
934 "z/VM guest %s has too many IUCV connections"
935 " to connect with the IUCV device\n",
936 netiucv_printname(conn->userid, 8));
937 fsm_newstate(fi, CONN_STATE_CONNERR);
940 dev_err(privptr->dev,
941 "The IUCV device cannot connect to a z/VM guest with no"
942 " IUCV authorization\n");
943 fsm_newstate(fi, CONN_STATE_CONNERR);
946 dev_err(privptr->dev,
947 "Connecting the IUCV device failed with error %d\n",
949 fsm_newstate(fi, CONN_STATE_CONNERR);
952 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
957 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
961 while ((skb = skb_dequeue(q))) {
962 atomic_dec(&skb->users);
963 dev_kfree_skb_any(skb);
967 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
969 struct iucv_event *ev = arg;
970 struct iucv_connection *conn = ev->conn;
971 struct net_device *netdev = conn->netdev;
972 struct netiucv_priv *privptr = netdev_priv(netdev);
974 IUCV_DBF_TEXT(trace, 3, __func__);
976 fsm_deltimer(&conn->timer);
977 fsm_newstate(fi, CONN_STATE_STOPPED);
978 netiucv_purge_skb_queue(&conn->collect_queue);
980 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
981 iucv_path_sever(conn->path, conn->userdata);
985 netiucv_purge_skb_queue(&conn->commit_queue);
986 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
989 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
991 struct iucv_connection *conn = arg;
992 struct net_device *netdev = conn->netdev;
994 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
995 netdev->name, conn->userid);
998 static const fsm_node conn_fsm[] = {
999 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
1000 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
1002 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
1003 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
1004 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
1005 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
1006 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
1007 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
1008 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
1010 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
1011 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1012 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1013 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
1014 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
1016 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
1017 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
1019 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
1020 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
1021 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
1023 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1024 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1026 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1027 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1030 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1034 * Actions for interface - statemachine.
1039 * @fi: An instance of an interface statemachine.
1040 * @event: The event, just happened.
1041 * @arg: Generic pointer, casted from struct net_device * upon call.
1043 * Startup connection by sending CONN_EVENT_START to it.
1045 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1047 struct net_device *dev = arg;
1048 struct netiucv_priv *privptr = netdev_priv(dev);
1050 IUCV_DBF_TEXT(trace, 3, __func__);
1052 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1053 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1057 * Shutdown connection by sending CONN_EVENT_STOP to it.
1059 * @param fi An instance of an interface statemachine.
1060 * @param event The event, just happened.
1061 * @param arg Generic pointer, casted from struct net_device * upon call.
1064 dev_action_stop(fsm_instance *fi, int event, void *arg)
1066 struct net_device *dev = arg;
1067 struct netiucv_priv *privptr = netdev_priv(dev);
1068 struct iucv_event ev;
1070 IUCV_DBF_TEXT(trace, 3, __func__);
1072 ev.conn = privptr->conn;
1074 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1075 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1079 * Called from connection statemachine
1080 * when a connection is up and running.
1082 * @param fi An instance of an interface statemachine.
1083 * @param event The event, just happened.
1084 * @param arg Generic pointer, casted from struct net_device * upon call.
1087 dev_action_connup(fsm_instance *fi, int event, void *arg)
1089 struct net_device *dev = arg;
1090 struct netiucv_priv *privptr = netdev_priv(dev);
1092 IUCV_DBF_TEXT(trace, 3, __func__);
1094 switch (fsm_getstate(fi)) {
1095 case DEV_STATE_STARTWAIT:
1096 fsm_newstate(fi, DEV_STATE_RUNNING);
1097 dev_info(privptr->dev,
1098 "The IUCV device has been connected"
1099 " successfully to %s\n",
1100 netiucv_printuser(privptr->conn));
1101 IUCV_DBF_TEXT(setup, 3,
1102 "connection is up and running\n");
1104 case DEV_STATE_STOPWAIT:
1105 IUCV_DBF_TEXT(data, 2,
1106 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1112 * Called from connection statemachine
1113 * when a connection has been shutdown.
1115 * @param fi An instance of an interface statemachine.
1116 * @param event The event, just happened.
1117 * @param arg Generic pointer, casted from struct net_device * upon call.
1120 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1122 IUCV_DBF_TEXT(trace, 3, __func__);
1124 switch (fsm_getstate(fi)) {
1125 case DEV_STATE_RUNNING:
1126 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1128 case DEV_STATE_STOPWAIT:
1129 fsm_newstate(fi, DEV_STATE_STOPPED);
1130 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1135 static const fsm_node dev_fsm[] = {
1136 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1138 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1139 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1141 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1142 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1144 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1145 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1146 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1149 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1152 * Transmit a packet.
1153 * This is a helper function for netiucv_tx().
1155 * @param conn Connection to be used for sending.
1156 * @param skb Pointer to struct sk_buff of packet to send.
1157 * The linklevel header has already been set up
1160 * @return 0 on success, -ERRNO on failure. (Never fails.)
1162 static int netiucv_transmit_skb(struct iucv_connection *conn,
1163 struct sk_buff *skb)
1165 struct iucv_message msg;
1166 unsigned long saveflags;
1167 struct ll_header header;
1170 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1171 int l = skb->len + NETIUCV_HDRLEN;
1173 spin_lock_irqsave(&conn->collect_lock, saveflags);
1174 if (conn->collect_len + l >
1175 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1177 IUCV_DBF_TEXT(data, 2,
1178 "EBUSY from netiucv_transmit_skb\n");
1180 atomic_inc(&skb->users);
1181 skb_queue_tail(&conn->collect_queue, skb);
1182 conn->collect_len += l;
1185 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1187 struct sk_buff *nskb = skb;
1189 * Copy the skb to a new allocated skb in lowmem only if the
1190 * data is located above 2G in memory or tailroom is < 2.
1192 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1193 NETIUCV_HDRLEN)) >> 31;
1195 if (hi || (skb_tailroom(skb) < 2)) {
1196 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1197 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1199 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1203 skb_reserve(nskb, NETIUCV_HDRLEN);
1204 memcpy(skb_put(nskb, skb->len),
1205 skb->data, skb->len);
1210 * skb now is below 2G and has enough room. Add headers.
1212 header.next = nskb->len + NETIUCV_HDRLEN;
1213 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1215 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1217 fsm_newstate(conn->fsm, CONN_STATE_TX);
1218 conn->prof.send_stamp = jiffies;
1222 rc = iucv_message_send(conn->path, &msg, 0, 0,
1223 nskb->data, nskb->len);
1224 conn->prof.doios_single++;
1225 conn->prof.txlen += skb->len;
1226 conn->prof.tx_pending++;
1227 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1228 conn->prof.tx_max_pending = conn->prof.tx_pending;
1230 struct netiucv_priv *privptr;
1231 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1232 conn->prof.tx_pending--;
1233 privptr = netdev_priv(conn->netdev);
1235 privptr->stats.tx_errors++;
1237 dev_kfree_skb(nskb);
1240 * Remove our headers. They get added
1241 * again on retransmit.
1243 skb_pull(skb, NETIUCV_HDRLEN);
1244 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1246 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1250 atomic_inc(&nskb->users);
1251 skb_queue_tail(&conn->commit_queue, nskb);
1259 * Interface API for upper network layers
1263 * Open an interface.
1264 * Called from generic network layer when ifconfig up is run.
1266 * @param dev Pointer to interface struct.
1268 * @return 0 on success, -ERRNO on failure. (Never fails.)
1270 static int netiucv_open(struct net_device *dev)
1272 struct netiucv_priv *priv = netdev_priv(dev);
1274 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1279 * Close an interface.
1280 * Called from generic network layer when ifconfig down is run.
1282 * @param dev Pointer to interface struct.
1284 * @return 0 on success, -ERRNO on failure. (Never fails.)
1286 static int netiucv_close(struct net_device *dev)
1288 struct netiucv_priv *priv = netdev_priv(dev);
1290 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1294 static int netiucv_pm_prepare(struct device *dev)
1296 IUCV_DBF_TEXT(trace, 3, __func__);
1300 static void netiucv_pm_complete(struct device *dev)
1302 IUCV_DBF_TEXT(trace, 3, __func__);
1307 * netiucv_pm_freeze() - Freeze PM callback
1308 * @dev: netiucv device
1310 * close open netiucv interfaces
1312 static int netiucv_pm_freeze(struct device *dev)
1314 struct netiucv_priv *priv = dev_get_drvdata(dev);
1315 struct net_device *ndev = NULL;
1318 IUCV_DBF_TEXT(trace, 3, __func__);
1319 if (priv && priv->conn)
1320 ndev = priv->conn->netdev;
1323 netif_device_detach(ndev);
1324 priv->pm_state = fsm_getstate(priv->fsm);
1325 rc = netiucv_close(ndev);
1331 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1332 * @dev: netiucv device
1334 * re-open netiucv interfaces closed during freeze
1336 static int netiucv_pm_restore_thaw(struct device *dev)
1338 struct netiucv_priv *priv = dev_get_drvdata(dev);
1339 struct net_device *ndev = NULL;
1342 IUCV_DBF_TEXT(trace, 3, __func__);
1343 if (priv && priv->conn)
1344 ndev = priv->conn->netdev;
1347 switch (priv->pm_state) {
1348 case DEV_STATE_RUNNING:
1349 case DEV_STATE_STARTWAIT:
1350 rc = netiucv_open(ndev);
1355 netif_device_attach(ndev);
1361 * Start transmission of a packet.
1362 * Called from generic network device layer.
1364 * @param skb Pointer to buffer containing the packet.
1365 * @param dev Pointer to interface struct.
1367 * @return 0 if packet consumed, !0 if packet rejected.
1368 * Note: If we return !0, then the packet is free'd by
1369 * the generic network layer.
1371 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1373 struct netiucv_priv *privptr = netdev_priv(dev);
1376 IUCV_DBF_TEXT(trace, 4, __func__);
1378 * Some sanity checks ...
1381 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1382 privptr->stats.tx_dropped++;
1383 return NETDEV_TX_OK;
1385 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1386 IUCV_DBF_TEXT(data, 2,
1387 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1389 privptr->stats.tx_dropped++;
1390 return NETDEV_TX_OK;
1394 * If connection is not running, try to restart it
1395 * and throw away packet.
1397 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1399 privptr->stats.tx_dropped++;
1400 privptr->stats.tx_errors++;
1401 privptr->stats.tx_carrier_errors++;
1402 return NETDEV_TX_OK;
1405 if (netiucv_test_and_set_busy(dev)) {
1406 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1407 return NETDEV_TX_BUSY;
1409 netif_trans_update(dev);
1410 rc = netiucv_transmit_skb(privptr->conn, skb);
1411 netiucv_clear_busy(dev);
1412 return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1417 * @dev: Pointer to interface struct.
1419 * Returns interface statistics of a device.
1421 * Returns pointer to stats struct of this interface.
1423 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1425 struct netiucv_priv *priv = netdev_priv(dev);
1427 IUCV_DBF_TEXT(trace, 5, __func__);
1428 return &priv->stats;
1432 * attributes in sysfs
1435 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1438 struct netiucv_priv *priv = dev_get_drvdata(dev);
1440 IUCV_DBF_TEXT(trace, 5, __func__);
1441 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1444 static int netiucv_check_user(const char *buf, size_t count, char *username,
1450 p = strchr(buf, '.');
1451 if ((p && ((count > 26) ||
1453 (buf + count - p > 18))) ||
1454 (!p && (count > 9))) {
1455 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1459 for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1460 if (isalnum(*p) || *p == '$') {
1461 username[i] = toupper(*p);
1465 /* trailing lf, grr */
1467 IUCV_DBF_TEXT_(setup, 2,
1468 "conn_write: invalid character %02x\n", *p);
1472 username[i++] = ' ';
1477 for (i = 0; i < 16 && *p; i++, p++) {
1480 userdata[i] = toupper(*p);
1482 while (i > 0 && i < 16)
1483 userdata[i++] = ' ';
1485 memcpy(userdata, iucvMagic_ascii, 16);
1486 userdata[16] = '\0';
1487 ASCEBC(userdata, 16);
1492 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1493 const char *buf, size_t count)
1495 struct netiucv_priv *priv = dev_get_drvdata(dev);
1496 struct net_device *ndev = priv->conn->netdev;
1500 struct iucv_connection *cp;
1502 IUCV_DBF_TEXT(trace, 3, __func__);
1503 rc = netiucv_check_user(buf, count, username, userdata);
1507 if (memcmp(username, priv->conn->userid, 9) &&
1508 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1509 /* username changed while the interface is active. */
1510 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1513 read_lock_bh(&iucv_connection_rwlock);
1514 list_for_each_entry(cp, &iucv_connection_list, list) {
1515 if (!strncmp(username, cp->userid, 9) &&
1516 !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1517 read_unlock_bh(&iucv_connection_rwlock);
1518 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1519 "already exists\n", netiucv_printuser(cp));
1523 read_unlock_bh(&iucv_connection_rwlock);
1524 memcpy(priv->conn->userid, username, 9);
1525 memcpy(priv->conn->userdata, userdata, 17);
1529 static DEVICE_ATTR(user, 0644, user_show, user_write);
1531 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1534 struct netiucv_priv *priv = dev_get_drvdata(dev);
1536 IUCV_DBF_TEXT(trace, 5, __func__);
1537 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1540 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1541 const char *buf, size_t count)
1543 struct netiucv_priv *priv = dev_get_drvdata(dev);
1544 struct net_device *ndev = priv->conn->netdev;
1548 IUCV_DBF_TEXT(trace, 3, __func__);
1552 rc = kstrtouint(buf, 0, &bs1);
1554 if (rc == -EINVAL) {
1555 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1559 if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1560 IUCV_DBF_TEXT_(setup, 2,
1561 "buffer_write: buffer size %d too large\n",
1565 if ((ndev->flags & IFF_RUNNING) &&
1566 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1567 IUCV_DBF_TEXT_(setup, 2,
1568 "buffer_write: buffer size %d too small\n",
1572 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1573 IUCV_DBF_TEXT_(setup, 2,
1574 "buffer_write: buffer size %d too small\n",
1579 priv->conn->max_buffsize = bs1;
1580 if (!(ndev->flags & IFF_RUNNING))
1581 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1587 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1589 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1592 struct netiucv_priv *priv = dev_get_drvdata(dev);
1594 IUCV_DBF_TEXT(trace, 5, __func__);
1595 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1598 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1600 static ssize_t conn_fsm_show (struct device *dev,
1601 struct device_attribute *attr, char *buf)
1603 struct netiucv_priv *priv = dev_get_drvdata(dev);
1605 IUCV_DBF_TEXT(trace, 5, __func__);
1606 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1609 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1611 static ssize_t maxmulti_show (struct device *dev,
1612 struct device_attribute *attr, char *buf)
1614 struct netiucv_priv *priv = dev_get_drvdata(dev);
1616 IUCV_DBF_TEXT(trace, 5, __func__);
1617 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1620 static ssize_t maxmulti_write (struct device *dev,
1621 struct device_attribute *attr,
1622 const char *buf, size_t count)
1624 struct netiucv_priv *priv = dev_get_drvdata(dev);
1626 IUCV_DBF_TEXT(trace, 4, __func__);
1627 priv->conn->prof.maxmulti = 0;
1631 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1633 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1636 struct netiucv_priv *priv = dev_get_drvdata(dev);
1638 IUCV_DBF_TEXT(trace, 5, __func__);
1639 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1642 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1643 const char *buf, size_t count)
1645 struct netiucv_priv *priv = dev_get_drvdata(dev);
1647 IUCV_DBF_TEXT(trace, 4, __func__);
1648 priv->conn->prof.maxcqueue = 0;
1652 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1654 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1657 struct netiucv_priv *priv = dev_get_drvdata(dev);
1659 IUCV_DBF_TEXT(trace, 5, __func__);
1660 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1663 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1664 const char *buf, size_t count)
1666 struct netiucv_priv *priv = dev_get_drvdata(dev);
1668 IUCV_DBF_TEXT(trace, 4, __func__);
1669 priv->conn->prof.doios_single = 0;
1673 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1675 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1678 struct netiucv_priv *priv = dev_get_drvdata(dev);
1680 IUCV_DBF_TEXT(trace, 5, __func__);
1681 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1684 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1685 const char *buf, size_t count)
1687 struct netiucv_priv *priv = dev_get_drvdata(dev);
1689 IUCV_DBF_TEXT(trace, 5, __func__);
1690 priv->conn->prof.doios_multi = 0;
1694 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1696 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1699 struct netiucv_priv *priv = dev_get_drvdata(dev);
1701 IUCV_DBF_TEXT(trace, 5, __func__);
1702 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1705 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1706 const char *buf, size_t count)
1708 struct netiucv_priv *priv = dev_get_drvdata(dev);
1710 IUCV_DBF_TEXT(trace, 4, __func__);
1711 priv->conn->prof.txlen = 0;
1715 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1717 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1720 struct netiucv_priv *priv = dev_get_drvdata(dev);
1722 IUCV_DBF_TEXT(trace, 5, __func__);
1723 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1726 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1727 const char *buf, size_t count)
1729 struct netiucv_priv *priv = dev_get_drvdata(dev);
1731 IUCV_DBF_TEXT(trace, 4, __func__);
1732 priv->conn->prof.tx_time = 0;
1736 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1738 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1741 struct netiucv_priv *priv = dev_get_drvdata(dev);
1743 IUCV_DBF_TEXT(trace, 5, __func__);
1744 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1747 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1748 const char *buf, size_t count)
1750 struct netiucv_priv *priv = dev_get_drvdata(dev);
1752 IUCV_DBF_TEXT(trace, 4, __func__);
1753 priv->conn->prof.tx_pending = 0;
1757 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1759 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1762 struct netiucv_priv *priv = dev_get_drvdata(dev);
1764 IUCV_DBF_TEXT(trace, 5, __func__);
1765 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1768 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1769 const char *buf, size_t count)
1771 struct netiucv_priv *priv = dev_get_drvdata(dev);
1773 IUCV_DBF_TEXT(trace, 4, __func__);
1774 priv->conn->prof.tx_max_pending = 0;
1778 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1780 static struct attribute *netiucv_attrs[] = {
1781 &dev_attr_buffer.attr,
1782 &dev_attr_user.attr,
1786 static struct attribute_group netiucv_attr_group = {
1787 .attrs = netiucv_attrs,
1790 static struct attribute *netiucv_stat_attrs[] = {
1791 &dev_attr_device_fsm_state.attr,
1792 &dev_attr_connection_fsm_state.attr,
1793 &dev_attr_max_tx_buffer_used.attr,
1794 &dev_attr_max_chained_skbs.attr,
1795 &dev_attr_tx_single_write_ops.attr,
1796 &dev_attr_tx_multi_write_ops.attr,
1797 &dev_attr_netto_bytes.attr,
1798 &dev_attr_max_tx_io_time.attr,
1799 &dev_attr_tx_pending.attr,
1800 &dev_attr_tx_max_pending.attr,
1804 static struct attribute_group netiucv_stat_attr_group = {
1806 .attrs = netiucv_stat_attrs,
1809 static const struct attribute_group *netiucv_attr_groups[] = {
1810 &netiucv_stat_attr_group,
1811 &netiucv_attr_group,
1815 static int netiucv_register_device(struct net_device *ndev)
1817 struct netiucv_priv *priv = netdev_priv(ndev);
1818 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1821 IUCV_DBF_TEXT(trace, 3, __func__);
1824 dev_set_name(dev, "net%s", ndev->name);
1825 dev->bus = &iucv_bus;
1826 dev->parent = iucv_root;
1827 dev->groups = netiucv_attr_groups;
1829 * The release function could be called after the
1830 * module has been unloaded. It's _only_ task is to
1831 * free the struct. Therefore, we specify kfree()
1832 * directly here. (Probably a little bit obfuscating
1833 * but legitime ...).
1835 dev->release = (void (*)(struct device *))kfree;
1836 dev->driver = &netiucv_driver;
1840 ret = device_register(dev);
1846 dev_set_drvdata(dev, priv);
1850 static void netiucv_unregister_device(struct device *dev)
1852 IUCV_DBF_TEXT(trace, 3, __func__);
1853 device_unregister(dev);
1857 * Allocate and initialize a new connection structure.
1858 * Add it to the list of netiucv connections;
1860 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1864 struct iucv_connection *conn;
1866 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1869 skb_queue_head_init(&conn->collect_queue);
1870 skb_queue_head_init(&conn->commit_queue);
1871 spin_lock_init(&conn->collect_lock);
1872 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1875 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1878 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1881 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1882 conn_event_names, NR_CONN_STATES,
1883 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1888 fsm_settimer(conn->fsm, &conn->timer);
1889 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1892 memcpy(conn->userdata, userdata, 17);
1894 memcpy(conn->userid, username, 9);
1895 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1898 write_lock_bh(&iucv_connection_rwlock);
1899 list_add_tail(&conn->list, &iucv_connection_list);
1900 write_unlock_bh(&iucv_connection_rwlock);
1904 kfree_skb(conn->tx_buff);
1906 kfree_skb(conn->rx_buff);
1914 * Release a connection structure and remove it from the
1915 * list of netiucv connections.
1917 static void netiucv_remove_connection(struct iucv_connection *conn)
1920 IUCV_DBF_TEXT(trace, 3, __func__);
1921 write_lock_bh(&iucv_connection_rwlock);
1922 list_del_init(&conn->list);
1923 write_unlock_bh(&iucv_connection_rwlock);
1924 fsm_deltimer(&conn->timer);
1925 netiucv_purge_skb_queue(&conn->collect_queue);
1927 iucv_path_sever(conn->path, conn->userdata);
1931 netiucv_purge_skb_queue(&conn->commit_queue);
1932 kfree_fsm(conn->fsm);
1933 kfree_skb(conn->rx_buff);
1934 kfree_skb(conn->tx_buff);
1938 * Release everything of a net device.
1940 static void netiucv_free_netdevice(struct net_device *dev)
1942 struct netiucv_priv *privptr = netdev_priv(dev);
1944 IUCV_DBF_TEXT(trace, 3, __func__);
1951 netiucv_remove_connection(privptr->conn);
1953 kfree_fsm(privptr->fsm);
1954 privptr->conn = NULL; privptr->fsm = NULL;
1955 /* privptr gets freed by free_netdev() */
1960 * Initialize a net device. (Called from kernel in alloc_netdev())
1962 static const struct net_device_ops netiucv_netdev_ops = {
1963 .ndo_open = netiucv_open,
1964 .ndo_stop = netiucv_close,
1965 .ndo_get_stats = netiucv_stats,
1966 .ndo_start_xmit = netiucv_tx,
1969 static void netiucv_setup_netdevice(struct net_device *dev)
1971 dev->mtu = NETIUCV_MTU_DEFAULT;
1973 dev->max_mtu = NETIUCV_MTU_MAX;
1974 dev->needs_free_netdev = true;
1975 dev->priv_destructor = netiucv_free_netdevice;
1976 dev->hard_header_len = NETIUCV_HDRLEN;
1978 dev->type = ARPHRD_SLIP;
1979 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1980 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1981 dev->netdev_ops = &netiucv_netdev_ops;
1985 * Allocate and initialize everything of a net device.
1987 static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1989 struct netiucv_priv *privptr;
1990 struct net_device *dev;
1992 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1993 NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1997 if (dev_alloc_name(dev, dev->name) < 0)
2000 privptr = netdev_priv(dev);
2001 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2002 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2003 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2007 privptr->conn = netiucv_new_connection(dev, username, userdata);
2008 if (!privptr->conn) {
2009 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2012 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2016 kfree_fsm(privptr->fsm);
2023 static ssize_t conn_write(struct device_driver *drv,
2024 const char *buf, size_t count)
2029 struct net_device *dev;
2030 struct netiucv_priv *priv;
2031 struct iucv_connection *cp;
2033 IUCV_DBF_TEXT(trace, 3, __func__);
2034 rc = netiucv_check_user(buf, count, username, userdata);
2038 read_lock_bh(&iucv_connection_rwlock);
2039 list_for_each_entry(cp, &iucv_connection_list, list) {
2040 if (!strncmp(username, cp->userid, 9) &&
2041 !strncmp(userdata, cp->userdata, 17)) {
2042 read_unlock_bh(&iucv_connection_rwlock);
2043 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2044 "already exists\n", netiucv_printuser(cp));
2048 read_unlock_bh(&iucv_connection_rwlock);
2050 dev = netiucv_init_netdevice(username, userdata);
2052 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2056 rc = netiucv_register_device(dev);
2059 IUCV_DBF_TEXT_(setup, 2,
2060 "ret %d from netiucv_register_device\n", rc);
2065 priv = netdev_priv(dev);
2066 SET_NETDEV_DEV(dev, priv->dev);
2068 rc = register_netdevice(dev);
2073 dev_info(priv->dev, "The IUCV interface to %s has been established "
2075 netiucv_printuser(priv->conn));
2080 netiucv_unregister_device(priv->dev);
2082 netiucv_free_netdevice(dev);
2086 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2088 static ssize_t remove_write (struct device_driver *drv,
2089 const char *buf, size_t count)
2091 struct iucv_connection *cp;
2092 struct net_device *ndev;
2093 struct netiucv_priv *priv;
2095 char name[IFNAMSIZ];
2099 IUCV_DBF_TEXT(trace, 3, __func__);
2101 if (count >= IFNAMSIZ)
2102 count = IFNAMSIZ - 1;
2104 for (i = 0, p = buf; i < count && *p; i++, p++) {
2105 if (*p == '\n' || *p == ' ')
2106 /* trailing lf, grr */
2112 read_lock_bh(&iucv_connection_rwlock);
2113 list_for_each_entry(cp, &iucv_connection_list, list) {
2115 priv = netdev_priv(ndev);
2117 if (strncmp(name, ndev->name, count))
2119 read_unlock_bh(&iucv_connection_rwlock);
2120 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2121 dev_warn(dev, "The IUCV device is connected"
2122 " to %s and cannot be removed\n",
2123 priv->conn->userid);
2124 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2127 unregister_netdev(ndev);
2128 netiucv_unregister_device(dev);
2131 read_unlock_bh(&iucv_connection_rwlock);
2132 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2136 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2138 static struct attribute * netiucv_drv_attrs[] = {
2139 &driver_attr_connection.attr,
2140 &driver_attr_remove.attr,
2144 static struct attribute_group netiucv_drv_attr_group = {
2145 .attrs = netiucv_drv_attrs,
2148 static const struct attribute_group *netiucv_drv_attr_groups[] = {
2149 &netiucv_drv_attr_group,
2153 static void netiucv_banner(void)
2155 pr_info("driver initialized\n");
2158 static void __exit netiucv_exit(void)
2160 struct iucv_connection *cp;
2161 struct net_device *ndev;
2162 struct netiucv_priv *priv;
2165 IUCV_DBF_TEXT(trace, 3, __func__);
2166 while (!list_empty(&iucv_connection_list)) {
2167 cp = list_entry(iucv_connection_list.next,
2168 struct iucv_connection, list);
2170 priv = netdev_priv(ndev);
2173 unregister_netdev(ndev);
2174 netiucv_unregister_device(dev);
2177 device_unregister(netiucv_dev);
2178 driver_unregister(&netiucv_driver);
2179 iucv_unregister(&netiucv_handler, 1);
2180 iucv_unregister_dbf_views();
2182 pr_info("driver unloaded\n");
2186 static int __init netiucv_init(void)
2190 rc = iucv_register_dbf_views();
2193 rc = iucv_register(&netiucv_handler, 1);
2196 IUCV_DBF_TEXT(trace, 3, __func__);
2197 netiucv_driver.groups = netiucv_drv_attr_groups;
2198 rc = driver_register(&netiucv_driver);
2200 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2203 /* establish dummy device */
2204 netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2209 dev_set_name(netiucv_dev, "netiucv");
2210 netiucv_dev->bus = &iucv_bus;
2211 netiucv_dev->parent = iucv_root;
2212 netiucv_dev->release = (void (*)(struct device *))kfree;
2213 netiucv_dev->driver = &netiucv_driver;
2214 rc = device_register(netiucv_dev);
2216 put_device(netiucv_dev);
2223 driver_unregister(&netiucv_driver);
2225 iucv_unregister(&netiucv_handler, 1);
2227 iucv_unregister_dbf_views();
2232 module_init(netiucv_init);
2233 module_exit(netiucv_exit);
2234 MODULE_LICENSE("GPL");