3 * Copyright 1999 Digi International (www.digi.com)
4 * James Puzzo <jamesp at digi dot com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
26 * Handle the file operations required for the "network" devices.
27 * Includes those functions required to register the "net" devices
36 #include <linux/module.h>
37 #include <linux/proc_fs.h>
38 #include <linux/types.h>
39 #include <linux/string.h>
40 #include <linux/device.h>
41 #include <linux/tty.h>
42 #include <linux/tty_flip.h>
43 #include <linux/spinlock.h>
44 #include <linux/poll.h>
45 #include <linux/sched.h>
46 #include <linux/ratelimit.h>
47 #include <asm/unaligned.h>
49 #define MYFLIPLEN TBUF_MAX
51 #include "dgrp_common.h"
53 #define TTY_FLIPBUF_SIZE 512
54 #define DEVICE_NAME_SIZE 50
57 * Generic helper function declarations
59 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
60 unsigned char *fbuf, int *len);
63 * File operation declarations
65 static int dgrp_net_open(struct inode *, struct file *);
66 static int dgrp_net_release(struct inode *, struct file *);
67 static ssize_t dgrp_net_read(struct file *, char __user *, size_t, loff_t *);
68 static ssize_t dgrp_net_write(struct file *, const char __user *, size_t,
70 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
72 static unsigned int dgrp_net_select(struct file *file,
73 struct poll_table_struct *table);
75 const struct file_operations dgrp_net_ops = {
77 .read = dgrp_net_read,
78 .write = dgrp_net_write,
79 .poll = dgrp_net_select,
80 .unlocked_ioctl = dgrp_net_ioctl,
81 .open = dgrp_net_open,
82 .release = dgrp_net_release,
86 * dgrp_dump() -- prints memory for debugging purposes.
87 * @mem: Memory location which should be printed to the console
88 * @len: Number of bytes to be dumped
90 static void dgrp_dump(u8 *mem, int len)
94 pr_debug("dgrp dump length = %d, data = ", len);
95 for (i = 0; i < len; ++i)
96 pr_debug("%.2x ", mem[i]);
101 * dgrp_read_data_block() -- Read a data block
102 * @ch: struct ch_struct *
104 * @flipbuf_size: size of flipbuf
106 static void dgrp_read_data_block(struct ch_struct *ch, u8 *flipbuf,
112 if (flipbuf_size <= 0)
115 t = RBUF_MAX - ch->ch_rout;
119 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, t);
125 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, n);
132 * dgrp_input() -- send data to the line disipline
133 * @ch: pointer to channel struct
135 * Copys the rbuf to the flipbuf and sends to line discipline.
136 * Sends input buffer data to the line discipline.
139 static void dgrp_input(struct ch_struct *ch)
141 struct nd_struct *nd;
142 struct tty_struct *tty;
158 spin_lock_irqsave(&nd->nd_lock, lock_flags);
160 myflipbuf = nd->nd_inputbuf;
161 myflipflagbuf = nd->nd_inputflagbuf;
163 if (!ch->ch_open_count) {
164 ch->ch_rout = ch->ch_rin;
168 if (ch->ch_tun.un_flag & UN_CLOSING) {
169 ch->ch_rout = ch->ch_rin;
173 tty = (ch->ch_tun).un_tty;
176 if (!tty || tty->magic != TTY_MAGIC) {
177 ch->ch_rout = ch->ch_rin;
181 tty_count = tty->count;
183 ch->ch_rout = ch->ch_rin;
187 if (tty->closing || test_bit(TTY_CLOSING, &tty->flags)) {
188 ch->ch_rout = ch->ch_rin;
192 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
194 /* data_len should be the number of chars that we read in */
195 data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
197 /* len is the amount of data we are going to transfer here */
198 len = tty_buffer_request_room(&ch->port, data_len);
200 /* Check DPA flow control */
201 if ((nd->nd_dpa_debug) &&
202 (nd->nd_dpa_flag & DPA_WAIT_SPACE) &&
203 (nd->nd_dpa_port == MINOR(tty_devnum(ch->ch_tun.un_tty))))
206 if ((len) && !(ch->ch_flag & CH_RXSTOP)) {
208 dgrp_read_data_block(ch, myflipbuf, len);
210 if (I_PARMRK(tty) || I_BRKINT(tty) || I_INPCK(tty))
211 parity_scan(ch, myflipbuf, myflipflagbuf, &len);
213 memset(myflipflagbuf, TTY_NORMAL, len);
215 if ((nd->nd_dpa_debug) &&
216 (nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
217 dgrp_dpa_data(nd, 1, myflipbuf, len);
219 tty_insert_flip_string_flags(&ch->port, myflipbuf,
221 tty_flip_buffer_push(&ch->port);
223 ch->ch_rxcount += len;
227 * Wake up any sleepers (maybe dgrp close) that might be waiting
228 * for a channel flag state change.
230 wake_up_interruptible(&ch->ch_flag_wait);
234 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
241 * Loop to inspect each single character or 0xFF escape.
243 * if PARMRK & ~DOSMODE:
244 * 0xFF 0xFF Normal 0xFF character, escaped
245 * to eliminate confusion.
246 * 0xFF 0x00 0x00 Break
247 * 0xFF 0x00 CC Error character CC.
248 * CC Normal character CC.
250 * if PARMRK & DOSMODE:
251 * 0xFF 0x18 0x00 Break
252 * 0xFF 0x08 0x00 Framing Error
253 * 0xFF 0x04 0x00 Parity error
254 * 0xFF 0x0C 0x00 Both Framing and Parity error
256 * TODO: do we need to do the XMODEM, XOFF, XON, XANY processing??
259 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
260 unsigned char *fbuf, int *len)
264 int DOS = ((ch->ch_iflag & IF_DOSMODE) == 0 ? 0 : 1);
265 unsigned char *cout; /* character buffer */
266 unsigned char *fout; /* flag buffer */
278 switch (ch->ch_pscan_state) {
280 /* reset to sanity and fall through */
281 ch->ch_pscan_state = 0 ;
285 if (c == 0xff) /* delete this character from stream */
286 ch->ch_pscan_state = 1;
289 *fout++ = TTY_NORMAL;
297 /* doubled ff, transform to single ff */
299 *fout++ = TTY_NORMAL;
301 ch->ch_pscan_state = 0;
303 /* save value examination in next state */
304 ch->ch_pscan_savechar = c;
305 ch->ch_pscan_state = 2;
310 /* third character of ff sequence */
313 if (ch->ch_pscan_savechar & 0x10)
315 else if (ch->ch_pscan_savechar & 0x08)
319 * either marked as a parity error,
320 * indeterminate, or not in DOSMODE
321 * call it a parity error
323 *fout++ = TTY_PARITY;
325 /* case FF XX ?? where XX is not 00 */
326 if (ch->ch_pscan_savechar & 0xff) {
327 /* this should not happen */
328 pr_info("%s: parity_scan: error unexpected byte\n",
330 *fout++ = TTY_PARITY;
332 /* case FF 00 XX where XX is not 00 */
334 *fout++ = TTY_PARITY;
341 ch->ch_pscan_state = 0;
349 * dgrp_net_idle() -- Idle the network connection
350 * @nd: pointer to node structure to idle
352 static void dgrp_net_idle(struct nd_struct *nd)
354 struct ch_struct *ch;
359 nd->nd_state = NS_IDLE;
362 for (i = nd->nd_seq_out; ; i = (i + 1) & SEQ_MASK) {
363 if (!nd->nd_seq_wait[i]) {
364 nd->nd_seq_wait[i] = 0;
365 wake_up_interruptible(&nd->nd_seq_wque[i]);
368 if (i == nd->nd_seq_in)
372 nd->nd_seq_out = nd->nd_seq_in;
377 nd->nd_tx_module = 0x10;
378 nd->nd_rx_module = 0x00;
380 for (i = 0, ch = nd->nd_chan; i < CHAN_MAX; i++, ch++) {
381 ch->ch_state = CS_IDLE;
384 ch->ch_otype_waiting = 0;
389 * Increase the number of channels, waking up any
390 * threads that might be waiting for the channels
393 static void increase_channel_count(struct nd_struct *nd, int n)
395 struct ch_struct *ch;
396 struct device *classp;
397 char name[DEVICE_NAME_SIZE];
402 for (i = nd->nd_chan_count; i < n; ++i) {
403 ch = nd->nd_chan + i;
405 /* FIXME: return a useful error instead! */
406 buf = kmalloc(TBUF_MAX, GFP_KERNEL);
411 pr_info_ratelimited("%s - ch_tbuf was not NULL\n",
416 buf = kmalloc(RBUF_MAX, GFP_KERNEL);
421 pr_info("%s - ch_rbuf was not NULL\n",
425 classp = tty_port_register_device(&ch->port,
426 nd->nd_serial_ttdriver, i,
429 ch->ch_tun.un_sysfs = classp;
430 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
432 dgrp_create_tty_sysfs(&ch->ch_tun, classp);
433 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
434 &classp->kobj, name);
436 /* NOTE: We don't support "cu" devices anymore,
437 * so you will notice we don't register them
439 if (dgrp_register_prdevices) {
440 classp = tty_register_device(nd->nd_xprint_ttdriver,
442 ch->ch_pun.un_sysfs = classp;
443 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
445 dgrp_create_tty_sysfs(&ch->ch_pun, classp);
446 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
447 &classp->kobj, name);
450 nd->nd_chan_count = i + 1;
451 wake_up_interruptible(&ch->ch_flag_wait);
456 * Decrease the number of channels, and wake up any threads that might
457 * be waiting on the channels that vanished.
459 static void decrease_channel_count(struct nd_struct *nd, int n)
461 struct ch_struct *ch;
462 char name[DEVICE_NAME_SIZE];
465 for (i = nd->nd_chan_count - 1; i >= n; --i) {
466 ch = nd->nd_chan + i;
469 * Make any open ports inoperative.
471 ch->ch_state = CS_IDLE;
474 ch->ch_otype_waiting = 0;
477 * Only "HANGUP" if we care about carrier
478 * transitions and we are already open.
480 if (ch->ch_open_count != 0) {
481 ch->ch_flag |= CH_HANGUP;
486 * Unlike the CH_HANGUP flag above, use another
487 * flag to indicate to the RealPort state machine
488 * that this port has disappeared.
490 if (ch->ch_open_count != 0)
491 ch->ch_flag |= CH_PORT_GONE;
493 wake_up_interruptible(&ch->ch_flag_wait);
495 nd->nd_chan_count = i;
503 nd->nd_chan_count = i;
505 dgrp_remove_tty_sysfs(ch->ch_tun.un_sysfs);
506 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
507 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
508 tty_unregister_device(nd->nd_serial_ttdriver, i);
511 * NOTE: We don't support "cu" devices anymore, so don't
512 * unregister them here anymore.
515 if (dgrp_register_prdevices) {
516 dgrp_remove_tty_sysfs(ch->ch_pun.un_sysfs);
517 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
518 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
519 tty_unregister_device(nd->nd_xprint_ttdriver, i);
525 * dgrp_chan_count() -- Adjust the node channel count.
526 * @nd: pointer to a node structure
527 * @n: new value for channel count
529 * Adjusts the node channel count. If new ports have appeared, it tries
530 * to signal those processes that might have been waiting for ports to
531 * appear. If ports have disappeared it tries to signal those processes
532 * that might be hung waiting for a response for the now non-existant port.
534 static void dgrp_chan_count(struct nd_struct *nd, int n)
536 if (n == nd->nd_chan_count)
539 if (n > nd->nd_chan_count)
540 increase_channel_count(nd, n);
542 if (n < nd->nd_chan_count)
543 decrease_channel_count(nd, n);
547 * dgrp_monitor() -- send data to the device monitor queue
548 * @nd: pointer to a node structure
549 * @buf: data to copy to the monitoring buffer
550 * @len: number of bytes to transfer to the buffer
552 * Called by the net device routines to send data to the device
553 * monitor queue. If the device monitor buffer is too full to
554 * accept the data, it waits until the buffer is ready.
556 static void dgrp_monitor(struct nd_struct *nd, u8 *buf, int len)
565 down(&nd->nd_mon_semaphore);
568 * Loop while data remains.
570 while ((len > 0) && (nd->nd_mon_buf)) {
572 * Determine the amount of available space left in the
573 * buffer. If there's none, wait until some appears.
576 n = (nd->nd_mon_out - nd->nd_mon_in - 1) & MON_MASK;
579 nd->nd_mon_flag |= MON_WAIT_SPACE;
581 up(&nd->nd_mon_semaphore);
584 * Go to sleep waiting until the condition becomes true.
586 rtn = wait_event_interruptible(nd->nd_mon_wqueue,
587 ((nd->nd_mon_flag & MON_WAIT_SPACE) == 0));
589 /* FIXME: really ignore rtn? */
592 * We can't exit here if we receive a signal, since
593 * to do so would trash the debug stream.
596 down(&nd->nd_mon_semaphore);
602 * Copy as much data as will fit.
608 r = MON_MAX - nd->nd_mon_in;
611 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, r);
621 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, n);
628 if (nd->nd_mon_in >= MON_MAX)
629 pr_info_ratelimited("%s - nd_mon_in (%i) >= MON_MAX\n",
630 __func__, nd->nd_mon_in);
633 * Wakeup any thread waiting for data
636 if (nd->nd_mon_flag & MON_WAIT_DATA) {
637 nd->nd_mon_flag &= ~MON_WAIT_DATA;
638 wake_up_interruptible(&nd->nd_mon_wqueue);
643 * Release the monitor lock.
645 up(&nd->nd_mon_semaphore);
649 * dgrp_encode_time() -- Encodes rpdump time into a 4-byte quantity.
650 * @nd: pointer to a node structure
651 * @buf: destination buffer
653 * Encodes "rpdump" time into a 4-byte quantity. Time is measured since
656 static void dgrp_encode_time(struct nd_struct *nd, u8 *buf)
661 * Convert time in HZ since open to time in milliseconds
664 t = jiffies - nd->nd_mon_lbolt;
665 t = 1000 * (t / HZ) + 1000 * (t % HZ) / HZ;
667 put_unaligned_be32((uint)(t & 0xffffffff), buf);
673 * dgrp_monitor_message() -- Builds a rpdump style message.
674 * @nd: pointer to a node structure
675 * @message: destination buffer
677 static void dgrp_monitor_message(struct nd_struct *nd, char *message)
682 header[0] = RPDUMP_MESSAGE;
684 dgrp_encode_time(nd, header + 1);
688 put_unaligned_be16(n, header + 5);
690 dgrp_monitor(nd, header, sizeof(header));
691 dgrp_monitor(nd, (u8 *) message, n);
697 * dgrp_monitor_reset() -- Note a reset in the monitoring buffer.
698 * @nd: pointer to a node structure
700 static void dgrp_monitor_reset(struct nd_struct *nd)
704 header[0] = RPDUMP_RESET;
706 dgrp_encode_time(nd, header + 1);
708 dgrp_monitor(nd, header, sizeof(header));
712 * dgrp_monitor_data() -- builds a monitor data packet
713 * @nd: pointer to a node structure
714 * @type: type of message to be logged
715 * @buf: data to be logged
716 * @size: number of bytes in the buffer
718 static void dgrp_monitor_data(struct nd_struct *nd, u8 type, u8 *buf, int size)
724 dgrp_encode_time(nd, header + 1);
726 put_unaligned_be16(size, header + 5);
728 dgrp_monitor(nd, header, sizeof(header));
729 dgrp_monitor(nd, buf, size);
732 static int alloc_nd_buffers(struct nd_struct *nd)
736 nd->nd_writebuf = NULL;
737 nd->nd_inputbuf = NULL;
738 nd->nd_inputflagbuf = NULL;
741 * Allocate the network read/write buffer.
743 nd->nd_iobuf = kzalloc(UIO_MAX + 10, GFP_KERNEL);
748 * Allocate a buffer for doing the copy from user space to
749 * kernel space in the write routines.
751 nd->nd_writebuf = kzalloc(WRITEBUFLEN, GFP_KERNEL);
752 if (!nd->nd_writebuf)
756 * Allocate a buffer for doing the copy from kernel space to
757 * tty buffer space in the read routines.
759 nd->nd_inputbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
760 if (!nd->nd_inputbuf)
764 * Allocate a buffer for doing the copy from kernel space to
765 * tty buffer space in the read routines.
767 nd->nd_inputflagbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
768 if (!nd->nd_inputflagbuf)
775 kfree(nd->nd_writebuf);
776 kfree(nd->nd_inputbuf);
777 kfree(nd->nd_inputflagbuf);
782 * dgrp_net_open() -- Open the NET device for a particular PortServer
784 static int dgrp_net_open(struct inode *inode, struct file *file)
786 struct nd_struct *nd;
787 struct proc_dir_entry *de;
791 rtn = try_module_get(THIS_MODULE);
795 if (!capable(CAP_SYS_ADMIN)) {
801 * Make sure that the "private_data" field hasn't already been used.
803 if (file->private_data) {
809 * Get the node pointer, and fail if it doesn't exist.
817 nd = (struct nd_struct *) de->data;
823 file->private_data = (void *) nd;
828 down(&nd->nd_net_semaphore);
830 if (nd->nd_state != NS_CLOSED) {
836 * Initialize the link speed parameters.
839 nd->nd_link.lk_fast_rate = UIO_MAX;
840 nd->nd_link.lk_slow_rate = UIO_MAX;
842 nd->nd_link.lk_fast_delay = 1000;
843 nd->nd_link.lk_slow_delay = 1000;
845 nd->nd_link.lk_header_size = 46;
848 rtn = alloc_nd_buffers(nd);
853 * The port is now open, so move it to the IDLE state
857 nd->nd_tx_time = jiffies;
860 * If the polling routing is not running, start it running here
862 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
864 if (!dgrp_poll_data.node_active_count) {
865 dgrp_poll_data.node_active_count = 2;
866 dgrp_poll_data.timer.expires = jiffies +
867 dgrp_poll_tick * HZ / 1000;
868 add_timer(&dgrp_poll_data.timer);
871 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
873 dgrp_monitor_message(nd, "Net Open");
877 * Release the NET lock.
879 up(&nd->nd_net_semaphore);
883 module_put(THIS_MODULE);
888 /* dgrp_net_release() -- close the NET device for a particular PortServer */
889 static int dgrp_net_release(struct inode *inode, struct file *file)
891 struct nd_struct *nd;
894 nd = (struct nd_struct *)(file->private_data);
898 /* TODO : historical locking placeholder */
900 * In the HPUX version of the RealPort driver (which served as a basis
901 * for this driver) this locking code was used. Saved if ever we need
902 * to review the locking under Linux.
904 /* spinlock(&nd->nd_lock); */
910 down(&nd->nd_net_semaphore);
913 * Before "closing" the internal connection, make sure all
918 nd->nd_state = NS_CLOSED;
922 * TODO ... must the wait queue be reset on close?
923 * should any pending waiters be reset?
924 * Let's decide to assert that the waitq is empty... and see
927 if (waitqueue_active(&nd->nd_tx_waitq))
928 pr_info("%s - expected waitqueue_active to be false\n",
936 /* TODO : historical locking placeholder */
938 * In the HPUX version of the RealPort driver (which served as a basis
939 * for this driver) this locking code was used. Saved if ever we need
940 * to review the locking under Linux.
942 /* spinunlock( &nd->nd_lock ); */
945 kfree(nd->nd_writebuf);
946 nd->nd_writebuf = NULL;
948 kfree(nd->nd_inputbuf);
949 nd->nd_inputbuf = NULL;
951 kfree(nd->nd_inputflagbuf);
952 nd->nd_inputflagbuf = NULL;
954 /* TODO : historical locking placeholder */
956 * In the HPUX version of the RealPort driver (which served as a basis
957 * for this driver) this locking code was used. Saved if ever we need
958 * to review the locking under Linux.
960 /* spinlock(&nd->nd_lock); */
963 * Set the active port count to zero.
965 dgrp_chan_count(nd, 0);
967 /* TODO : historical locking placeholder */
969 * In the HPUX version of the RealPort driver (which served as a basis
970 * for this driver) this locking code was used. Saved if ever we need
971 * to review the locking under Linux.
973 /* spinunlock(&nd->nd_lock); */
976 * Release the NET lock.
978 up(&nd->nd_net_semaphore);
981 * Cause the poller to stop scheduling itself if this is
982 * the last active node.
984 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
986 if (dgrp_poll_data.node_active_count == 2) {
987 del_timer(&dgrp_poll_data.timer);
988 dgrp_poll_data.node_active_count = 0;
991 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
993 down(&nd->nd_net_semaphore);
995 dgrp_monitor_message(nd, "Net Close");
997 up(&nd->nd_net_semaphore);
1000 module_put(THIS_MODULE);
1001 file->private_data = NULL;
1005 /* used in dgrp_send to setup command header */
1006 static inline u8 *set_cmd_header(u8 *b, u8 port, u8 cmd)
1008 *b++ = 0xb0 + (port & 0x0f);
1014 * dgrp_send() -- build a packet for transmission to the server
1015 * @nd: pointer to a node structure
1016 * @tmax: maximum bytes to transmit
1018 * returns number of bytes sent
1020 static int dgrp_send(struct nd_struct *nd, long tmax)
1022 struct ch_struct *ch = nd->nd_chan;
1041 long wanted_sync_port = -1;
1042 ushort tdata[CHAN_MAX];
1045 mbuf = nd->nd_iobuf + UIO_BASE;
1048 send_sync = nd->nd_link.lk_slow_rate < UIO_MAX;
1053 memset(tdata, 0, sizeof(tdata));
1057 * If there are any outstanding requests to be serviced,
1058 * service them here.
1060 if (nd->nd_send & NR_PASSWORD) {
1063 * Send Password response.
1068 put_unaligned_be16(strlen(nd->password), b + 2);
1070 b += strlen(nd->password);
1071 nd->nd_send &= ~(NR_PASSWORD);
1076 * Loop over all modules to generate commands, and determine
1077 * the amount of data queued for transmit.
1080 for (mod = 0, port = 0; port < nd->nd_chan_count; mod++) {
1082 * If this is not the current module, enter a module select
1083 * code in the buffer.
1086 if (mod != nd->nd_tx_module)
1090 * Loop to process one module.
1093 maxport = port + 16;
1095 if (maxport > nd->nd_chan_count)
1096 maxport = nd->nd_chan_count;
1098 for (; port < maxport; port++, ch++) {
1100 * Switch based on channel state.
1103 switch (ch->ch_state) {
1105 * Send requests when the port is closed, and there
1106 * are no Open, Close or Cancel requests expected.
1111 * Wait until any open error code
1112 * has been delivered to all
1116 if (ch->ch_open_error) {
1117 if (ch->ch_wait_count[ch->ch_otype]) {
1122 ch->ch_open_error = 0;
1126 * Wait until the channel HANGUP flag is reset
1127 * before sending the first open. We can only
1128 * get to this state after a server disconnect.
1131 if ((ch->ch_flag & CH_HANGUP) != 0)
1135 * If recovering from a TCP disconnect, or if
1136 * there is an immediate open pending, send an
1137 * Immediate Open request.
1139 if ((ch->ch_flag & CH_PORT_GONE) ||
1140 ch->ch_wait_count[OTYPE_IMMEDIATE] != 0) {
1141 b = set_cmd_header(b, port, 10);
1144 ch->ch_state = CS_WAIT_OPEN;
1145 ch->ch_otype = OTYPE_IMMEDIATE;
1150 * If there is no Persistent or Incoming Open on the wait
1151 * list in the server, and a thread is waiting for a
1152 * Persistent or Incoming Open, send a Persistent or Incoming
1155 if (ch->ch_otype_waiting == 0) {
1156 if (ch->ch_wait_count[OTYPE_PERSISTENT] != 0) {
1157 b = set_cmd_header(b, port, 10);
1160 ch->ch_state = CS_WAIT_OPEN;
1161 ch->ch_otype = OTYPE_PERSISTENT;
1162 } else if (ch->ch_wait_count[OTYPE_INCOMING] != 0) {
1163 b = set_cmd_header(b, port, 10);
1166 ch->ch_state = CS_WAIT_OPEN;
1167 ch->ch_otype = OTYPE_INCOMING;
1173 * If a Persistent or Incoming Open is pending in
1174 * the server, but there is no longer an open
1175 * thread waiting for it, cancel the request.
1178 if (ch->ch_wait_count[ch->ch_otype_waiting] == 0) {
1179 b = set_cmd_header(b, port, 10);
1182 ch->ch_state = CS_WAIT_CANCEL;
1183 ch->ch_otype = ch->ch_otype_waiting;
1188 * Send port parameter queries.
1192 * Clear out all FEP state that might remain
1193 * from the last connection.
1196 ch->ch_flag |= CH_PARAM;
1198 ch->ch_flag &= ~CH_RX_FLUSH;
1234 /* Send Sequence Request */
1235 b = set_cmd_header(b, port, 14);
1237 /* Configure Event Conditions Packet */
1238 b = set_cmd_header(b, port, 42);
1239 put_unaligned_be16(0x02c0, b);
1241 *b++ = (DM_DTR | DM_RTS | DM_CTS |
1242 DM_DSR | DM_RI | DM_CD);
1244 /* Send Status Request */
1245 b = set_cmd_header(b, port, 16);
1247 /* Send Buffer Request */
1248 b = set_cmd_header(b, port, 20);
1250 /* Send Port Capability Request */
1251 b = set_cmd_header(b, port, 22);
1253 ch->ch_expect = (RR_SEQUENCE |
1258 ch->ch_state = CS_WAIT_QUERY;
1260 /* Raise modem signals */
1261 b = set_cmd_header(b, port, 44);
1263 if (ch->ch_flag & CH_PORT_GONE)
1264 ch->ch_s_mout = ch->ch_mout;
1266 ch->ch_s_mout = ch->ch_mout = DM_DTR | DM_RTS;
1269 *b++ = ch->ch_s_mflow = 0;
1270 *b++ = ch->ch_s_mctrl = ch->ch_mctrl = 0;
1272 if (ch->ch_flag & CH_PORT_GONE)
1273 ch->ch_flag &= ~CH_PORT_GONE;
1278 * Handle normal open and ready mode.
1284 * If the port is not open, and there are no
1285 * no longer any ports requesting an open,
1286 * then close the port.
1289 if (ch->ch_open_count == 0 &&
1290 ch->ch_wait_count[ch->ch_otype] == 0) {
1295 * Process waiting input.
1297 * If there is no one to read it, discard the data.
1299 * Otherwise if we are not in fastcook mode, or if there is a
1300 * fastcook thread waiting for data, send the data to the
1303 if (ch->ch_rin != ch->ch_rout) {
1304 if (ch->ch_tun.un_open_count == 0 ||
1305 (ch->ch_tun.un_flag & UN_CLOSING) ||
1306 (ch->ch_cflag & CF_CREAD) == 0) {
1307 ch->ch_rout = ch->ch_rin;
1308 } else if ((ch->ch_flag & CH_FAST_READ) == 0 ||
1309 ch->ch_inwait != 0) {
1312 if (ch->ch_rin != ch->ch_rout)
1318 * Handle receive flush, and changes to
1319 * server port parameters.
1322 if (ch->ch_flag & (CH_RX_FLUSH | CH_PARAM)) {
1324 * If we are in receive flush mode,
1325 * and enough data has gone by, reset
1326 * receive flush mode.
1328 if (ch->ch_flag & CH_RX_FLUSH) {
1329 if (((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >
1330 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK))
1331 ch->ch_flag &= ~CH_RX_FLUSH;
1340 if (ch->ch_s_tmax != ch->ch_tmax ||
1341 ch->ch_s_ttime != ch->ch_ttime) {
1342 b = set_cmd_header(b, port, 48);
1344 ch->ch_s_tmax = ch->ch_tmax;
1345 ch->ch_s_ttime = ch->ch_ttime;
1347 put_unaligned_be16(ch->ch_s_tmax,
1351 put_unaligned_be16(ch->ch_s_ttime,
1360 if (ch->ch_s_rlow != ch->ch_rlow ||
1361 ch->ch_s_rhigh != ch->ch_rhigh) {
1362 b = set_cmd_header(b, port, 45);
1364 ch->ch_s_rlow = ch->ch_rlow;
1365 ch->ch_s_rhigh = ch->ch_rhigh;
1367 put_unaligned_be16(ch->ch_s_rlow,
1371 put_unaligned_be16(ch->ch_s_rhigh,
1377 * Send BRATE, CFLAG, IFLAG,
1381 if (ch->ch_s_brate != ch->ch_brate ||
1382 ch->ch_s_cflag != ch->ch_cflag ||
1383 ch->ch_s_iflag != ch->ch_iflag ||
1384 ch->ch_s_oflag != ch->ch_oflag ||
1385 ch->ch_s_xflag != ch->ch_xflag) {
1386 b = set_cmd_header(b, port, 40);
1388 ch->ch_s_brate = ch->ch_brate;
1389 ch->ch_s_cflag = ch->ch_cflag;
1390 ch->ch_s_iflag = ch->ch_iflag;
1391 ch->ch_s_oflag = ch->ch_oflag;
1392 ch->ch_s_xflag = ch->ch_xflag;
1394 put_unaligned_be16(ch->ch_s_brate,
1398 put_unaligned_be16(ch->ch_s_cflag,
1402 put_unaligned_be16(ch->ch_s_iflag,
1406 put_unaligned_be16(ch->ch_s_oflag,
1410 put_unaligned_be16(ch->ch_s_xflag,
1416 * Send MOUT, MFLOW, MCTRL.
1419 if (ch->ch_s_mout != ch->ch_mout ||
1420 ch->ch_s_mflow != ch->ch_mflow ||
1421 ch->ch_s_mctrl != ch->ch_mctrl) {
1422 b = set_cmd_header(b, port, 44);
1424 *b++ = ch->ch_s_mout = ch->ch_mout;
1425 *b++ = ch->ch_s_mflow = ch->ch_mflow;
1426 *b++ = ch->ch_s_mctrl = ch->ch_mctrl;
1430 * Send Flow control characters.
1433 if (ch->ch_s_xon != ch->ch_xon ||
1434 ch->ch_s_xoff != ch->ch_xoff ||
1435 ch->ch_s_lnext != ch->ch_lnext ||
1436 ch->ch_s_xxon != ch->ch_xxon ||
1437 ch->ch_s_xxoff != ch->ch_xxoff) {
1438 b = set_cmd_header(b, port, 46);
1440 *b++ = ch->ch_s_xon = ch->ch_xon;
1441 *b++ = ch->ch_s_xoff = ch->ch_xoff;
1442 *b++ = ch->ch_s_lnext = ch->ch_lnext;
1443 *b++ = ch->ch_s_xxon = ch->ch_xxon;
1444 *b++ = ch->ch_s_xxoff = ch->ch_xxoff;
1451 if (ch->ch_s_rmax != ch->ch_rmax ||
1452 ch->ch_s_rtime != ch->ch_rtime) {
1453 b = set_cmd_header(b, port, 47);
1455 ch->ch_s_rmax = ch->ch_rmax;
1456 ch->ch_s_rtime = ch->ch_rtime;
1458 put_unaligned_be16(ch->ch_s_rmax,
1462 put_unaligned_be16(ch->ch_s_rtime,
1467 ch->ch_flag &= ~CH_PARAM;
1468 wake_up_interruptible(&ch->ch_flag_wait);
1473 * Handle action commands.
1476 if (ch->ch_send != 0) {
1477 /* int send = ch->ch_send & ~ch->ch_expect; */
1478 send = ch->ch_send & ~ch->ch_expect;
1480 /* Send character immediate */
1481 if ((send & RR_TX_ICHAR) != 0) {
1482 b = set_cmd_header(b, port, 60);
1485 ch->ch_expect |= RR_TX_ICHAR;
1489 if ((send & RR_TX_BREAK) != 0) {
1490 if (ch->ch_break_time != 0) {
1491 b = set_cmd_header(b, port, 61);
1492 put_unaligned_be16(ch->ch_break_time,
1496 ch->ch_expect |= RR_TX_BREAK;
1497 ch->ch_break_time = 0;
1499 ch->ch_send &= ~RR_TX_BREAK;
1500 ch->ch_flag &= ~CH_TX_BREAK;
1501 wake_up_interruptible(&ch->ch_flag_wait);
1506 * Flush input/output buffers.
1509 if ((send & (RR_RX_FLUSH | RR_TX_FLUSH)) != 0) {
1510 b = set_cmd_header(b, port, 62);
1512 *b++ = ((send & RR_TX_FLUSH) == 0 ? 1 :
1513 (send & RR_RX_FLUSH) == 0 ? 2 : 3);
1515 if (send & RR_RX_FLUSH) {
1516 ch->ch_flush_seq = nd->nd_seq_in;
1517 ch->ch_flag |= CH_RX_FLUSH;
1520 wanted_sync_port = port;
1523 ch->ch_send &= ~(RR_RX_FLUSH | RR_TX_FLUSH);
1526 /* Pause input/output */
1527 if ((send & (RR_RX_STOP | RR_TX_STOP)) != 0) {
1528 b = set_cmd_header(b, port, 63);
1531 if ((send & RR_TX_STOP) != 0)
1534 if ((send & RR_RX_STOP) != 0)
1539 ch->ch_send &= ~(RR_RX_STOP | RR_TX_STOP);
1542 /* Start input/output */
1543 if ((send & (RR_RX_START | RR_TX_START)) != 0) {
1544 b = set_cmd_header(b, port, 64);
1547 if ((send & RR_TX_START) != 0)
1548 *b |= EV_OPU | EV_OPS | EV_OPX;
1550 if ((send & RR_RX_START) != 0)
1551 *b |= EV_IPU | EV_IPS;
1555 ch->ch_send &= ~(RR_RX_START | RR_TX_START);
1561 * Send a window sequence to acknowledge received data.
1564 rwin = (ch->ch_s_rin +
1565 ((ch->ch_rout - ch->ch_rin - 1) & RBUF_MASK));
1567 n = (rwin - ch->ch_s_rwin) & 0xffff;
1569 if (n >= RBUF_MAX / 4) {
1570 b[0] = 0xa0 + (port & 0xf);
1571 ch->ch_s_rwin = rwin;
1572 put_unaligned_be16(rwin, b + 1);
1577 * If the terminal is waiting on LOW
1578 * water or EMPTY, and the condition
1579 * is now satisfied, call the line
1580 * discipline to put more data in the
1584 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1586 if ((ch->ch_tun.un_flag & (UN_EMPTY|UN_LOW)) != 0) {
1587 if ((ch->ch_tun.un_flag & UN_LOW) != 0 ?
1589 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin)) {
1590 ch->ch_tun.un_flag &= ~(UN_EMPTY|UN_LOW);
1592 if (waitqueue_active(&((ch->ch_tun.un_tty)->write_wait)))
1593 wake_up_interruptible(&((ch->ch_tun.un_tty)->write_wait));
1594 tty_wakeup(ch->ch_tun.un_tty);
1595 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1600 * If the printer is waiting on LOW
1601 * water, TIME, EMPTY or PWAIT, and is
1602 * now ready to put more data in the
1603 * buffer, call the line discipline to
1607 /* FIXME: jiffies - ch->ch_waketime can never
1608 be < 0. Someone needs to work out what is
1609 actually intended here */
1610 if (ch->ch_pun.un_open_count &&
1611 (ch->ch_pun.un_flag &
1612 (UN_EMPTY|UN_TIME|UN_LOW|UN_PWAIT)) != 0) {
1614 if ((ch->ch_pun.un_flag & UN_LOW) != 0 ?
1616 (ch->ch_pun.un_flag & UN_TIME) != 0 ?
1617 ((jiffies - ch->ch_waketime) >= 0) :
1618 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin) &&
1619 ((ch->ch_pun.un_flag & UN_EMPTY) != 0 ||
1620 ((ch->ch_tun.un_open_count &&
1621 ch->ch_tun.un_tty->ops->chars_in_buffer) ?
1622 (ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) == 0
1626 ch->ch_pun.un_flag &= ~(UN_EMPTY | UN_TIME | UN_LOW | UN_PWAIT);
1628 if (waitqueue_active(&((ch->ch_pun.un_tty)->write_wait)))
1629 wake_up_interruptible(&((ch->ch_pun.un_tty)->write_wait));
1630 tty_wakeup(ch->ch_pun.un_tty);
1631 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1633 } else if ((ch->ch_pun.un_flag & UN_TIME) != 0) {
1640 * Determine the max number of bytes
1641 * this port can send, including
1642 * packet header overhead.
1645 t = ((ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff);
1651 n += (n <= 8 ? 1 : n <= 255 ? 2 : 3);
1664 b = set_cmd_header(b, port, 10);
1665 if (ch->ch_otype == OTYPE_IMMEDIATE)
1670 ch->ch_state = CS_WAIT_CLOSE;
1674 * Wait for a previous server request.
1678 case CS_WAIT_CANCEL:
1685 pr_info("%s - unexpected channel state (%i)\n",
1686 __func__, ch->ch_state);
1691 * If a module select code is needed, drop one in. If space
1692 * was reserved for one, but none is needed, recover the space.
1695 if (mod != nd->nd_tx_module) {
1697 mbuf[-1] = 0xf0 | mod;
1698 nd->nd_tx_module = mod;
1706 * Adjust "tmax" so that under worst case conditions we do
1707 * not overflow either the daemon buffer or the internal
1708 * buffer in the loop that follows. Leave a safe area
1709 * of 64 bytes so we start getting asserts before we start
1710 * losing data or clobbering memory.
1713 n = UIO_MAX - UIO_BASE;
1723 * Allocate space for 5 Module Selects, 1 Sequence Request,
1724 * and 1 Set TREQ for each active channel.
1727 tmax -= 5 + 3 + 4 * nd->nd_chan_count;
1730 * Further reduce "tmax" to the available transmit credit.
1731 * Note that this is a soft constraint; The transmit credit
1732 * can go negative for a time and then recover.
1735 n = nd->nd_tx_deposit - nd->nd_tx_charge - nd->nd_link.lk_header_size;
1741 * Finally reduce tmax by the number of bytes already in
1748 * Suspend data transmit unless every ready channel can send
1749 * at least 1 character.
1751 if (tmax < 2 * nd->nd_chan_count) {
1754 } else if (tchan > 1 && ttotal > tmax) {
1757 * If transmit is limited by the credit budget, find the
1758 * largest number of characters we can send without driving
1759 * the credit negative.
1768 for (try = 0; try < 3; try++) {
1772 for (i = 0; i < tc; i++) {
1773 if (tsend < tdata[i])
1774 tdata[c++] = tdata[i];
1790 tsend = tm / nd->nd_chan_count;
1797 * If no budgetary constraints, or only one channel ready
1798 * to send, set the character limit to the remaining
1805 tsend -= (tsend <= 9) ? 1 : (tsend <= 257) ? 2 : 3;
1808 * Loop over all channels, sending queued data.
1815 for (mod = 0; port < nd->nd_chan_count; mod++) {
1817 * If this is not the current module, enter a module select
1818 * code in the buffer.
1821 if (mod != nd->nd_tx_module)
1825 * Loop to process one module.
1828 maxport = port + 16;
1830 if (maxport > nd->nd_chan_count)
1831 maxport = nd->nd_chan_count;
1833 for (; port < maxport; port++, ch++) {
1834 if (ch->ch_state != CS_READY)
1839 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1842 * If there is data that can be sent, send it.
1845 if (n != 0 && used_buffer > 0) {
1846 t = (ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff;
1856 if (n > used_buffer) {
1865 * Create the correct size transmit header,
1866 * depending on the amount of data to transmit.
1871 b[0] = ((n - 1) << 4) + (port & 0xf);
1874 } else if (n <= 255) {
1876 b[0] = 0x80 + (port & 0xf);
1882 b[0] = 0x90 + (port & 0xf);
1883 put_unaligned_be16(n, b + 1);
1887 ch->ch_s_tin = (ch->ch_s_tin + n) & 0xffff;
1890 * Copy transmit data to the packet.
1893 t = TBUF_MAX - ch->ch_tout;
1896 memcpy(b, ch->ch_tbuf + ch->ch_tout, t);
1903 memcpy(b, ch->ch_tbuf + ch->ch_tout, n);
1907 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1911 * Wake any terminal unit process waiting in the
1912 * dgrp_write routine for low water.
1918 if ((ch->ch_flag & CH_LOW) != 0) {
1919 ch->ch_flag &= ~CH_LOW;
1920 wake_up_interruptible(&ch->ch_flag_wait);
1923 /* selwakeup tty_sel */
1924 if (ch->ch_tun.un_open_count) {
1925 struct tty_struct *tty = (ch->ch_tun.un_tty);
1927 if (waitqueue_active(&tty->write_wait))
1928 wake_up_interruptible(&tty->write_wait);
1933 if (ch->ch_pun.un_open_count) {
1934 struct tty_struct *tty = (ch->ch_pun.un_tty);
1936 if (waitqueue_active(&tty->write_wait))
1937 wake_up_interruptible(&tty->write_wait);
1943 * Do EMPTY processing.
1949 if ((ch->ch_flag & (CH_EMPTY | CH_DRAIN)) != 0 ||
1950 (ch->ch_pun.un_flag & UN_EMPTY) != 0) {
1952 * If there is still data in the server, ask the server
1953 * to notify us when its all gone.
1956 if (ch->ch_s_treq != ch->ch_s_tin) {
1957 b = set_cmd_header(b, port, 43);
1959 ch->ch_s_treq = ch->ch_s_tin;
1960 put_unaligned_be16(ch->ch_s_treq,
1966 * If there is a thread waiting for buffer empty,
1967 * and we are truly empty, wake the thread.
1970 else if ((ch->ch_flag & CH_EMPTY) != 0 &&
1971 (ch->ch_send & RR_TX_BREAK) == 0) {
1972 ch->ch_flag &= ~CH_EMPTY;
1974 wake_up_interruptible(&ch->ch_flag_wait);
1980 * If a module select code is needed, drop one in. If space
1981 * was reserved for one, but none is needed, recover the space.
1984 if (mod != nd->nd_tx_module) {
1986 mbuf[-1] = 0xf0 | mod;
1987 nd->nd_tx_module = mod;
1995 * Send a synchronization sequence associated with the last open
1996 * channel that sent data, and remember the time when the data was
2002 if ((send_sync || nd->nd_seq_wait[in] != 0) && lastport >= 0) {
2006 * Attempt the use the port that really wanted the sync.
2007 * This gets around a race condition where the "lastport" is in
2008 * the middle of the close() routine, and by the time we
2009 * send this command, it will have already acked the close, and
2010 * thus not send the sync response.
2012 if (wanted_sync_port >= 0)
2013 lastport = wanted_sync_port;
2015 * Set a flag just in case the port is in the middle of a close,
2016 * it will not be permitted to actually close until we get an
2017 * sync response, and clear the flag there.
2019 ch = nd->nd_chan + lastport;
2020 ch->ch_flag |= CH_WAITING_SYNC;
2022 mod = lastport >> 4;
2024 if (mod != nd->nd_tx_module) {
2028 nd->nd_tx_module = mod;
2031 bb = set_cmd_header(bb, lastport, 12);
2034 nd->nd_seq_size[in] = bb - buf;
2035 nd->nd_seq_time[in] = jiffies;
2037 if (++in >= SEQ_MAX)
2040 if (in != nd->nd_seq_out) {
2043 nd->nd_unack += b - buf;
2048 * If there are no open ports, a sync cannot be sent.
2049 * There is nothing left to wait for anyway, so wake any
2050 * thread waiting for an acknowledgement.
2053 else if (nd->nd_seq_wait[in] != 0) {
2054 nd->nd_seq_wait[in] = 0;
2056 wake_up_interruptible(&nd->nd_seq_wque[in]);
2060 * If there is no traffic for an interval of IDLE_MAX, then
2061 * send a single byte packet.
2065 nd->nd_tx_time = jiffies;
2066 } else if ((ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX) {
2067 *b++ = 0xf0 | nd->nd_tx_module;
2068 nd->nd_tx_time = jiffies;
2074 pr_info("%s - n(%i) >= tsafe(%i)\n",
2075 __func__, n, tsafe);
2080 nd->nd_tx_work = work;
2087 * Data to be sent TO the PortServer from the "async." half of the driver.
2089 static ssize_t dgrp_net_read(struct file *file, char __user *buf, size_t count,
2092 struct nd_struct *nd;
2099 * Get the node pointer, and quit if it doesn't exist.
2101 nd = (struct nd_struct *)(file->private_data);
2105 if (count < UIO_MIN)
2109 * Only one read/write operation may be in progress at
2114 * Grab the NET lock.
2116 down(&nd->nd_net_semaphore);
2118 nd->nd_read_count++;
2120 nd->nd_tx_ready = 0;
2123 * Determine the effective size of the buffer.
2126 if (nd->nd_remain > UIO_BASE)
2127 pr_info_ratelimited("%s - nd_remain(%i) > UIO_BASE\n",
2128 __func__, nd->nd_remain);
2130 b = local_buf = nd->nd_iobuf + UIO_BASE;
2133 * Generate data according to the node state.
2136 switch (nd->nd_state) {
2138 * Initialize the connection.
2143 dgrp_monitor_reset(nd);
2146 * Request a Product ID Packet.
2153 nd->nd_expect |= NR_IDENT;
2156 * Request a Server Capability ID Response.
2163 nd->nd_expect |= NR_CAPABILITY;
2166 * Request a Server VPD Response.
2173 nd->nd_expect |= NR_VPD;
2175 nd->nd_state = NS_WAIT_QUERY;
2179 * We do serious communication with the server only in
2184 b = dgrp_send(nd, count) + local_buf;
2188 * Send off an error after receiving a bogus message
2193 n = strlen(nd->nd_error);
2197 memcpy(b + 2, nd->nd_error, n);
2202 * Set the active port count to zero.
2204 dgrp_chan_count(nd, 0);
2214 nd->nd_send_count++;
2216 nd->nd_tx_byte += n + nd->nd_link.lk_header_size;
2217 nd->nd_tx_charge += n + nd->nd_link.lk_header_size;
2220 rtn = copy_to_user((void __user *)buf, local_buf, n);
2231 dgrp_monitor_data(nd, RPDUMP_CLIENT, local_buf, n);
2234 * Release the NET lock.
2237 up(&nd->nd_net_semaphore);
2243 * dgrp_receive() -- decode data packets received from the remote PortServer.
2244 * @nd: pointer to a node structure
2246 static void dgrp_receive(struct nd_struct *nd)
2248 struct ch_struct *ch;
2265 nd->nd_tx_time = jiffies;
2267 ID_TO_CHAR(nd->nd_ID, ID);
2269 b = buf = nd->nd_iobuf;
2270 remain = nd->nd_remain;
2273 * Loop to process Realport protocol packets.
2276 while (remain > 0) {
2278 int n1 = b[0] & 0x0f;
2281 port = (nd->nd_rx_module << 4) + n1;
2283 if (port >= nd->nd_chan_count) {
2284 error = "Improper Port Number";
2288 ch = nd->nd_chan + port;
2295 * Process by major packet type.
2301 * Process 1-byte header data packet.
2319 * Process 2-byte header data packet.
2333 * Process 3-byte header data packet.
2340 dlen = get_unaligned_be16(b + 1);
2346 * Common packet handling code.
2353 * Otherwise data should appear only when we are
2354 * in the CS_READY state.
2357 if (ch->ch_state < CS_READY) {
2358 error = "Data received before RWIN established";
2363 * Assure that the data received is within the
2367 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2370 error = "Receive data overrun";
2375 * If we received 3 or less characters,
2376 * assume it is a human typing, and set RTIME
2377 * to 10 milliseconds.
2379 * If we receive 10 or more characters,
2380 * assume its not a human typing, and set RTIME
2381 * to 100 milliseconds.
2384 if (ch->ch_edelay != DGRP_RTIME) {
2385 if (ch->ch_rtime != ch->ch_edelay) {
2386 ch->ch_rtime = ch->ch_edelay;
2387 ch->ch_flag |= CH_PARAM;
2389 } else if (dlen <= 3) {
2390 if (ch->ch_rtime != 10) {
2392 ch->ch_flag |= CH_PARAM;
2395 if (ch->ch_rtime != DGRP_RTIME) {
2396 ch->ch_rtime = DGRP_RTIME;
2397 ch->ch_flag |= CH_PARAM;
2402 * If a portion of the packet is outside the
2403 * buffer, shorten the effective length of the
2404 * data packet to be the amount of data received.
2408 dlen -= plen - remain;
2411 * Detect if receive flush is now complete.
2414 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2415 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2416 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2417 ch->ch_flag &= ~CH_RX_FLUSH;
2421 * If we are ready to receive, move the data into
2422 * the receive buffer.
2425 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2427 if (ch->ch_state == CS_READY &&
2428 (ch->ch_tun.un_open_count != 0) &&
2429 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2430 (ch->ch_cflag & CF_CREAD) != 0 &&
2431 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2432 (ch->ch_send & RR_RX_FLUSH) == 0) {
2434 if (ch->ch_rin + dlen >= RBUF_MAX) {
2435 n = RBUF_MAX - ch->ch_rin;
2437 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2444 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2450 * If we are not in fastcook mode, or
2451 * if there is a fastcook thread
2452 * waiting for data, send the data to
2453 * the line discipline.
2456 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2457 ch->ch_inwait != 0) {
2462 * If there is a read thread waiting
2463 * in select, and we are in fastcook
2464 * mode, wake him up.
2467 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2468 (ch->ch_flag & CH_FAST_READ) != 0)
2469 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2472 * Wake any thread waiting in the
2476 if ((ch->ch_flag & CH_INPUT) != 0) {
2477 ch->ch_flag &= ~CH_INPUT;
2479 wake_up_interruptible(&ch->ch_flag_wait);
2484 * Fabricate and insert a data packet header to
2485 * preced the remaining data when it comes in.
2488 if (remain < plen) {
2489 dlen = plen - remain;
2493 put_unaligned_be16(dlen, b + 1);
2501 * Handle Window Sequence packets.
2512 ushort tpos = get_unaligned_be16(b + 1);
2514 ushort ack = (tpos - ch->ch_s_tpos) & 0xffff;
2515 ushort unack = (ch->ch_s_tin - ch->ch_s_tpos) & 0xffff;
2516 ushort notify = (ch->ch_s_treq - ch->ch_s_tpos) & 0xffff;
2518 if (ch->ch_state < CS_READY || ack > unack) {
2519 error = "Improper Window Sequence";
2523 ch->ch_s_tpos = tpos;
2526 ch->ch_s_treq = tpos;
2531 * Handle Command response packets.
2537 * RealPort engine fix - 03/11/2004
2539 * This check did not used to be here.
2541 * We were using b[1] without verifying that the data
2542 * is actually there and valid. On a split packet, it
2545 * NOTE: I have never actually seen the failure happen
2546 * under Linux, but since I have seen it occur
2547 * under both Solaris and HP-UX, the assumption
2548 * is that it *could* happen here as well...
2557 * Handle Open Response.
2570 port = get_unaligned_be16(b + 4);
2572 if (port >= nd->nd_chan_count) {
2573 error = "Open channel number out of range";
2577 ch = nd->nd_chan + port;
2580 * How we handle an open response depends primarily
2581 * on our current channel state.
2584 switch (ch->ch_state) {
2588 * Handle a delayed open.
2591 if (ch->ch_otype_waiting != 0 &&
2592 req == ch->ch_otype_waiting &&
2595 ch->ch_otype_waiting = 0;
2596 ch->ch_state = CS_SEND_QUERY;
2604 * Handle the open response.
2607 if (req == ch->ch_otype) {
2611 * On successful response, open the
2612 * port and proceed normally.
2616 ch->ch_state = CS_SEND_QUERY;
2620 * On a busy response to a persistent open,
2621 * remember that the open is pending.
2626 if (req != OTYPE_IMMEDIATE) {
2627 ch->ch_otype_waiting = req;
2628 ch->ch_state = CS_IDLE;
2633 * Otherwise the server open failed. If
2634 * the Unix port is open, hang it up.
2638 if (ch->ch_open_count != 0) {
2639 ch->ch_flag |= CH_HANGUP;
2641 ch->ch_state = CS_IDLE;
2645 ch->ch_open_error = resp;
2646 ch->ch_state = CS_IDLE;
2648 wake_up_interruptible(&ch->ch_flag_wait);
2654 * Handle delayed response arrival preceding
2655 * the open response we are waiting for.
2658 if (ch->ch_otype_waiting != 0 &&
2659 req == ch->ch_otype_waiting &&
2661 ch->ch_otype = ch->ch_otype_waiting;
2662 ch->ch_otype_waiting = 0;
2663 ch->ch_state = CS_WAIT_FAIL;
2672 * Handle response to immediate open arriving
2673 * after a delayed open success.
2676 if (req == OTYPE_IMMEDIATE) {
2677 ch->ch_state = CS_SEND_QUERY;
2683 case CS_WAIT_CANCEL:
2685 * Handle delayed open response arriving before
2686 * the cancel response.
2689 if (req == ch->ch_otype_waiting &&
2691 ch->ch_otype_waiting = 0;
2696 * Handle cancel response.
2699 if (req == 4 && resp == 0) {
2700 ch->ch_otype_waiting = 0;
2701 ch->ch_state = CS_IDLE;
2709 * Handle a successful response to a port
2714 ch->ch_state = CS_IDLE;
2722 error = "Improper Open Response";
2730 * Handle Synchronize Response.
2742 * If channel was waiting for this sync response,
2743 * unset the flag, and wake up anyone waiting
2746 if (ch->ch_flag & CH_WAITING_SYNC) {
2747 ch->ch_flag &= ~(CH_WAITING_SYNC);
2748 wake_up_interruptible(&ch->ch_flag_wait);
2751 if (((seq - nd->nd_seq_out) & SEQ_MASK) >=
2752 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2756 for (s = nd->nd_seq_out;; s = (s + 1) & SEQ_MASK) {
2757 if (nd->nd_seq_wait[s] != 0) {
2758 nd->nd_seq_wait[s] = 0;
2760 wake_up_interruptible(&nd->nd_seq_wque[s]);
2763 nd->nd_unack -= nd->nd_seq_size[s];
2769 nd->nd_seq_out = (seq + 1) & SEQ_MASK;
2774 * Handle Sequence Response.
2783 /* Record that we have received the Sequence
2784 * Response, but we aren't interested in the
2785 * sequence numbers. We were using RIN like it
2786 * was ROUT and that was causing problems,
2787 * fixed 7-13-2001 David Fries. See comment in
2788 * drp.h for ch_s_rin variable.
2789 int rin = get_unaligned_be16(b + 2);
2790 int tpos = get_unaligned_be16(b + 4);
2793 ch->ch_send &= ~RR_SEQUENCE;
2794 ch->ch_expect &= ~RR_SEQUENCE;
2799 * Handle Status Response.
2808 ch->ch_s_elast = get_unaligned_be16(b + 2);
2809 ch->ch_s_mlast = b[4];
2811 ch->ch_expect &= ~RR_STATUS;
2812 ch->ch_send &= ~RR_STATUS;
2815 * CH_PHYS_CD is cleared because something _could_ be
2816 * waiting for the initial sense of carrier... and if
2817 * carrier is high immediately, we want to be sure to
2818 * wake them as soon as possible.
2820 ch->ch_flag &= ~CH_PHYS_CD;
2827 * Handle Line Error Response.
2838 * Handle Buffer Response.
2847 ch->ch_s_rsize = get_unaligned_be16(b + 2);
2848 ch->ch_s_tsize = get_unaligned_be16(b + 4);
2850 ch->ch_send &= ~RR_BUFFER;
2851 ch->ch_expect &= ~RR_BUFFER;
2856 * Handle Port Capability Response.
2865 ch->ch_send &= ~RR_CAPABILITY;
2866 ch->ch_expect &= ~RR_CAPABILITY;
2870 * When all queries are complete, set those parameters
2871 * derived from the query results, then transition
2872 * to the READY state.
2876 if (ch->ch_state == CS_WAIT_QUERY &&
2877 (ch->ch_expect & (RR_SEQUENCE |
2880 RR_CAPABILITY)) == 0) {
2881 ch->ch_tmax = ch->ch_s_tsize / 4;
2883 if (ch->ch_edelay == DGRP_TTIME)
2884 ch->ch_ttime = DGRP_TTIME;
2886 ch->ch_ttime = ch->ch_edelay;
2888 ch->ch_rmax = ch->ch_s_rsize / 4;
2890 if (ch->ch_edelay == DGRP_RTIME)
2891 ch->ch_rtime = DGRP_RTIME;
2893 ch->ch_rtime = ch->ch_edelay;
2895 ch->ch_rlow = 2 * ch->ch_s_rsize / 8;
2896 ch->ch_rhigh = 6 * ch->ch_s_rsize / 8;
2898 ch->ch_state = CS_READY;
2901 wake_up_interruptible(&ch->ch_flag_wait);
2920 mlast = ch->ch_s_mlast;
2921 elast = ch->ch_s_elast;
2923 mstat = ch->ch_s_mlast = b[1];
2924 estat = ch->ch_s_elast = get_unaligned_be16(b + 2);
2927 * Handle modem changes.
2930 if (((mstat ^ mlast) & DM_CD) != 0)
2935 * Handle received break.
2938 if ((estat & ~elast & EV_RXB) != 0 &&
2939 (ch->ch_tun.un_open_count != 0) &&
2940 I_BRKINT(ch->ch_tun.un_tty) &&
2941 !(I_IGNBRK(ch->ch_tun.un_tty))) {
2943 tty_buffer_request_room(&ch->port, 1);
2944 tty_insert_flip_char(&ch->port, 0, TTY_BREAK);
2945 tty_flip_buffer_push(&ch->port);
2950 * On transmit break complete, if more break traffic
2951 * is waiting then send it. Otherwise wake any threads
2952 * waiting for transmitter empty.
2955 if ((~estat & elast & EV_TXB) != 0 &&
2956 (ch->ch_expect & RR_TX_BREAK) != 0) {
2960 ch->ch_expect &= ~RR_TX_BREAK;
2962 if (ch->ch_break_time != 0) {
2963 ch->ch_send |= RR_TX_BREAK;
2965 ch->ch_send &= ~RR_TX_BREAK;
2966 ch->ch_flag &= ~CH_TX_BREAK;
2967 wake_up_interruptible(&ch->ch_flag_wait);
2974 error = "Unrecognized command";
2978 * Decode Special Codes.
2984 * One byte module select.
2996 nd->nd_rx_module = n1;
3000 * Two byte module select.
3008 nd->nd_rx_module = b[1];
3012 * ID Request packet.
3019 plen = get_unaligned_be16(b + 2);
3021 if (plen < 12 || plen > 1000) {
3022 error = "Response Packet length error";
3034 nd->nd_send |= NR_ECHO;
3038 * ID Response packet.
3042 nd->nd_send |= NR_IDENT;
3046 * ID Response packet.
3050 nd->nd_send |= NR_PASSWORD;
3057 * Various node-level response packets.
3064 plen = get_unaligned_be16(b + 2);
3066 if (plen < 4 || plen > 1000) {
3067 error = "Response Packet length error";
3079 nd->nd_expect &= ~NR_ECHO;
3083 * Product Response Packet.
3090 nd->nd_hw_ver = (b[8] << 8) | b[9];
3091 nd->nd_sw_ver = (b[10] << 8) | b[11];
3092 nd->nd_hw_id = b[6];
3093 desclen = ((plen - 12) > MAX_DESC_LEN) ? MAX_DESC_LEN :
3097 error = "Response Packet desclen error";
3101 strncpy(nd->nd_ps_desc, b + 12, desclen);
3102 nd->nd_ps_desc[desclen] = 0;
3105 nd->nd_expect &= ~NR_IDENT;
3109 * Capability Response Packet.
3114 int nn = get_unaligned_be16(b + 4);
3119 dgrp_chan_count(nd, nn);
3122 nd->nd_expect &= ~NR_CAPABILITY;
3126 * VPD Response Packet.
3131 * NOTE: case 15 is here ONLY because the EtherLite
3132 * is broken, and sends a response to 24 back as 15.
3133 * To resolve this, the EtherLite firmware is now
3134 * fixed to send back 24 correctly, but, for backwards
3135 * compatibility, we now have reserved 15 for the
3136 * bad EtherLite response to 24 as well.
3144 * If the product doesn't support VPD,
3145 * it will send back a null IDRESP,
3146 * which is a length of 4 bytes.
3149 memcpy(nd->nd_vpd, b + 4, min(plen - 4, (long) VPDSIZE));
3150 nd->nd_vpd_len = min(plen - 4, (long) VPDSIZE);
3153 nd->nd_expect &= ~NR_VPD;
3160 if (nd->nd_expect == 0 &&
3161 nd->nd_state == NS_WAIT_QUERY) {
3162 nd->nd_state = NS_READY;
3174 plen = get_unaligned_be16(b + 2) + 4;
3177 error = "Debug Packet too large";
3186 * Handle reset packet.
3205 error = "Client Reset Acknowledge";
3222 * When the buffer is exhausted, copy any data left at the
3223 * top of the buffer back down to the bottom for the next
3228 if (remain > 0 && b != buf)
3229 memcpy(buf, b, remain);
3231 nd->nd_remain = remain;
3235 * Handle a decode error.
3239 error = "Protocol decode error";
3242 * Handle a general protocol error.
3247 nd->nd_state = NS_SEND_ERROR;
3248 nd->nd_error = error;
3252 * dgrp_net_write() -- write data to the network device.
3254 * A zero byte write indicates that the connection to the RealPort
3255 * device has been broken.
3257 * A non-zero write indicates data from the RealPort device.
3259 static ssize_t dgrp_net_write(struct file *file, const char __user *buf,
3260 size_t count, loff_t *ppos)
3262 struct nd_struct *nd;
3268 * Get the node pointer, and quit if it doesn't exist.
3270 nd = (struct nd_struct *)(file->private_data);
3275 * Grab the NET lock.
3277 down(&nd->nd_net_semaphore);
3279 nd->nd_write_count++;
3282 * Handle disconnect.
3288 * Set the active port count to zero.
3290 dgrp_chan_count(nd, 0);
3295 * Loop to process entire receive packet.
3299 n = UIO_MAX - nd->nd_remain;
3304 nd->nd_rx_byte += n + nd->nd_link.lk_header_size;
3306 rtn = copy_from_user(nd->nd_iobuf + nd->nd_remain,
3307 (void __user *) buf + total, n);
3320 dgrp_monitor_data(nd, RPDUMP_SERVER,
3321 nd->nd_iobuf + nd->nd_remain, n);
3332 * Release the NET lock.
3334 up(&nd->nd_net_semaphore);
3342 * Determine whether a device is ready to be read or written to, and
3345 static unsigned int dgrp_net_select(struct file *file,
3346 struct poll_table_struct *table)
3348 unsigned int retval = 0;
3349 struct nd_struct *nd = file->private_data;
3351 poll_wait(file, &nd->nd_tx_waitq, table);
3353 if (nd->nd_tx_ready)
3354 retval |= POLLIN | POLLRDNORM; /* Conditionally readable */
3356 retval |= POLLOUT | POLLWRNORM; /* Always writeable */
3364 * Implement those functions which allow the network daemon to control
3365 * the network parameters in the driver. The ioctls include ones to
3366 * get and set the link speed parameters for the PortServer.
3368 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
3371 struct nd_struct *nd;
3373 long size = _IOC_SIZE(cmd);
3374 struct link_struct link;
3376 nd = file->private_data;
3378 if (_IOC_DIR(cmd) & _IOC_READ)
3379 rtn = access_ok(VERIFY_WRITE, (void __user *) arg, size);
3380 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3381 rtn = access_ok(VERIFY_READ, (void __user *) arg, size);
3388 if (size != sizeof(struct link_struct))
3391 if (copy_from_user((void *)(&link), (void __user *) arg, size))
3394 if (link.lk_fast_rate < 9600)
3395 link.lk_fast_rate = 9600;
3397 if (link.lk_slow_rate < 2400)
3398 link.lk_slow_rate = 2400;
3400 if (link.lk_fast_rate > 10000000)
3401 link.lk_fast_rate = 10000000;
3403 if (link.lk_slow_rate > link.lk_fast_rate)
3404 link.lk_slow_rate = link.lk_fast_rate;
3406 if (link.lk_fast_delay > 2000)
3407 link.lk_fast_delay = 2000;
3409 if (link.lk_slow_delay > 10000)
3410 link.lk_slow_delay = 10000;
3412 if (link.lk_fast_delay < 60)
3413 link.lk_fast_delay = 60;
3415 if (link.lk_slow_delay < link.lk_fast_delay)
3416 link.lk_slow_delay = link.lk_fast_delay;
3418 if (link.lk_header_size < 2)
3419 link.lk_header_size = 2;
3421 if (link.lk_header_size > 128)
3422 link.lk_header_size = 128;
3424 link.lk_fast_rate /= 8 * 1000 / dgrp_poll_tick;
3425 link.lk_slow_rate /= 8 * 1000 / dgrp_poll_tick;
3427 link.lk_fast_delay /= dgrp_poll_tick;
3428 link.lk_slow_delay /= dgrp_poll_tick;
3435 if (size != sizeof(struct link_struct))
3438 if (copy_to_user((void __user *)arg, (void *)(&nd->nd_link),
3453 * dgrp_poll_handler() -- handler for poll timer
3455 * As each timer expires, it determines (a) whether the "transmit"
3456 * waiter needs to be woken up, and (b) whether the poller needs to
3459 void dgrp_poll_handler(unsigned long arg)
3461 struct dgrp_poll_data *poll_data;
3462 struct nd_struct *nd;
3463 struct link_struct *lk;
3469 poll_data = (struct dgrp_poll_data *) arg;
3470 freq = 1000 / poll_data->poll_tick;
3471 poll_data->poll_round += 17;
3473 if (poll_data->poll_round >= freq)
3474 poll_data->poll_round -= freq;
3477 * Loop to process all open nodes.
3479 * For each node, determine the rate at which it should
3480 * be transmitting data. Then if the node should wake up
3481 * and transmit data now, enable the net receive select
3482 * to get the transmit going.
3485 list_for_each_entry(nd, &nd_struct_list, list) {
3490 * Decrement statistics. These are only for use with
3491 * KME, so don't worry that the operations are done
3492 * unlocked, and so the results are occasionally wrong.
3495 nd->nd_read_count -= (nd->nd_read_count +
3496 poll_data->poll_round) / freq;
3497 nd->nd_write_count -= (nd->nd_write_count +
3498 poll_data->poll_round) / freq;
3499 nd->nd_send_count -= (nd->nd_send_count +
3500 poll_data->poll_round) / freq;
3501 nd->nd_tx_byte -= (nd->nd_tx_byte +
3502 poll_data->poll_round) / freq;
3503 nd->nd_rx_byte -= (nd->nd_rx_byte +
3504 poll_data->poll_round) / freq;
3507 * Wake the daemon to transmit data only when there is
3508 * enough byte credit to send data.
3510 * The results are approximate because the operations
3511 * are performed unlocked, and we are inspecting
3512 * data asynchronously updated elsewhere. The whole
3513 * thing is just approximation anyway, so that should
3517 if (lk->lk_slow_rate >= UIO_MAX) {
3520 nd->nd_rate = UIO_MAX;
3522 nd->nd_tx_deposit = nd->nd_tx_charge + 3 * UIO_MAX;
3523 nd->nd_tx_credit = 3 * UIO_MAX;
3534 long seq_in = nd->nd_seq_in;
3535 long seq_out = nd->nd_seq_out;
3538 * If there are no outstanding packets, run at the
3542 if (seq_in == seq_out) {
3544 rate = lk->lk_fast_rate;
3548 * Otherwise compute the transmit rate based on the
3549 * delay since the oldest packet.
3554 * The actual delay is computed as the
3555 * time since the oldest unacknowledged
3556 * packet was sent, minus the time it
3557 * took to send that packet to the server.
3560 delay = ((jiffies - nd->nd_seq_time[seq_out])
3561 - (nd->nd_seq_size[seq_out] /
3565 * If the delay is less than the "fast"
3566 * delay, transmit full speed. If greater
3567 * than the "slow" delay, transmit at the
3568 * "slow" speed. In between, interpolate
3569 * between the fast and slow speeds.
3573 (delay <= lk->lk_fast_delay ?
3575 delay >= lk->lk_slow_delay ?
3578 (lk->lk_slow_delay - delay) *
3579 (lk->lk_fast_rate - lk->lk_slow_rate) /
3580 (lk->lk_slow_delay - lk->lk_fast_delay)
3585 nd->nd_delay = delay;
3589 * Increase the transmit credit by depositing the
3590 * current transmit rate.
3593 deposit = nd->nd_tx_deposit;
3594 charge = nd->nd_tx_charge;
3599 * If the available transmit credit becomes too large,
3600 * reduce the deposit to correct the value.
3602 * Too large is the max of:
3603 * 6 times the header size
3604 * 3 times the current transmit rate.
3607 size = 2 * nd->nd_link.lk_header_size;
3614 excess = deposit - charge - size;
3619 nd->nd_tx_deposit = deposit;
3620 nd->nd_tx_credit = deposit - charge;
3623 * Wake the transmit task only if the transmit credit
3624 * is at least 3 times the transmit header size.
3627 size = 3 * lk->lk_header_size;
3629 if (nd->nd_tx_credit < size)
3635 * Enable the READ select to wake the daemon if there
3636 * is useful work for the drp_read routine to perform.
3639 if (waitqueue_active(&nd->nd_tx_waitq) &&
3640 (nd->nd_tx_work != 0 ||
3641 (ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX)) {
3642 nd->nd_tx_ready = 1;
3644 wake_up_interruptible(&nd->nd_tx_waitq);
3647 /* nd->nd_flag &= ~ND_SELECT; */
3653 * Schedule ourself back at the nominal wakeup interval.
3655 spin_lock_irqsave(&poll_data->poll_lock, lock_flags);
3657 poll_data->node_active_count--;
3658 if (poll_data->node_active_count > 0) {
3659 poll_data->node_active_count++;
3660 poll_time = poll_data->timer.expires +
3661 poll_data->poll_tick * HZ / 1000;
3663 time = poll_time - jiffies;
3665 if (time >= 2 * poll_data->poll_tick)
3666 poll_time = jiffies + dgrp_poll_tick * HZ / 1000;
3668 poll_data->timer.expires = poll_time;
3669 add_timer(&poll_data->timer);
3672 spin_unlock_irqrestore(&poll_data->poll_lock, lock_flags);