2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include "linux/completion.h"
7 #include "linux/interrupt.h"
8 #include "linux/list.h"
9 #include "linux/mutex.h"
10 #include "linux/slab.h"
11 #include "linux/workqueue.h"
12 #include "asm/atomic.h"
19 struct list_head list;
22 struct completion done;
26 struct list_head pending;
27 struct list_head connections;
31 struct port_list *port;
37 struct list_head list;
42 struct port_list *port;
45 static irqreturn_t pipe_interrupt(int irq, void *data)
47 struct connection *conn = data;
50 fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
55 printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
57 os_close_file(conn->fd);
60 list_del(&conn->list);
63 list_add(&conn->list, &conn->port->connections);
65 complete(&conn->port->done);
69 #define NO_WAITER_MSG \
71 "There are currently no UML consoles waiting for port connections.\n" \
72 "Either disconnect from one to make it available or activate some more\n" \
73 "by enabling more consoles in the UML /etc/inittab.\n" \
76 static int port_accept(struct port_list *port)
78 struct connection *conn;
79 int fd, socket[2], pid;
81 fd = port_connection(port->fd, socket, &pid);
84 printk(KERN_ERR "port_accept : port_connection "
85 "returned %d\n", -fd);
89 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
91 printk(KERN_ERR "port_accept : failed to allocate "
95 *conn = ((struct connection)
96 { .list = LIST_HEAD_INIT(conn->list),
98 .socket = { socket[0], socket[1] },
102 if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
103 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
105 printk(KERN_ERR "port_accept : failed to get IRQ for "
110 if (atomic_read(&port->wait_count) == 0) {
111 os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
112 printk(KERN_ERR "No one waiting for port\n");
114 list_add(&conn->list, &port->pending);
121 os_kill_process(pid, 1);
126 static DEFINE_MUTEX(ports_mutex);
127 static LIST_HEAD(ports);
129 static void port_work_proc(struct work_struct *unused)
131 struct port_list *port;
132 struct list_head *ele;
135 local_irq_save(flags);
136 list_for_each(ele, &ports) {
137 port = list_entry(ele, struct port_list, list);
138 if (!port->has_connection)
141 reactivate_fd(port->fd, ACCEPT_IRQ);
142 while (port_accept(port))
144 port->has_connection = 0;
146 local_irq_restore(flags);
149 DECLARE_WORK(port_work, port_work_proc);
151 static irqreturn_t port_interrupt(int irq, void *data)
153 struct port_list *port = data;
155 port->has_connection = 1;
156 schedule_work(&port_work);
160 void *port_data(int port_num)
162 struct list_head *ele;
163 struct port_list *port;
164 struct port_dev *dev = NULL;
167 mutex_lock(&ports_mutex);
168 list_for_each(ele, &ports) {
169 port = list_entry(ele, struct port_list, list);
170 if (port->port == port_num)
173 port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
175 printk(KERN_ERR "Allocation of port list failed\n");
179 fd = port_listen_fd(port_num);
181 printk(KERN_ERR "binding to port %d failed, errno = %d\n",
186 if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
187 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
189 printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
193 *port = ((struct port_list)
194 { .list = LIST_HEAD_INIT(port->list),
195 .wait_count = ATOMIC_INIT(0),
199 .pending = LIST_HEAD_INIT(port->pending),
200 .connections = LIST_HEAD_INIT(port->connections) });
201 spin_lock_init(&port->lock);
202 init_completion(&port->done);
203 list_add(&port->list, &ports);
206 dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
208 printk(KERN_ERR "Allocation of port device entry failed\n");
212 *dev = ((struct port_dev) { .port = port,
214 .telnetd_pid = -1 });
222 mutex_unlock(&ports_mutex);
226 int port_wait(void *data)
228 struct port_dev *dev = data;
229 struct connection *conn;
230 struct port_list *port = dev->port;
233 atomic_inc(&port->wait_count);
236 if (wait_for_completion_interruptible(&port->done))
239 spin_lock(&port->lock);
241 conn = list_entry(port->connections.next, struct connection,
243 list_del(&conn->list);
244 spin_unlock(&port->lock);
246 os_shutdown_socket(conn->socket[0], 1, 1);
247 os_close_file(conn->socket[0]);
248 os_shutdown_socket(conn->socket[1], 1, 1);
249 os_close_file(conn->socket[1]);
251 /* This is done here because freeing an IRQ can't be done
252 * within the IRQ handler. So, pipe_interrupt always ups
253 * the semaphore regardless of whether it got a successful
254 * connection. Then we loop here throwing out failed
255 * connections until a good one is found.
257 free_irq(TELNETD_IRQ, conn);
261 os_close_file(conn->fd);
266 dev->helper_pid = conn->helper_pid;
267 dev->telnetd_pid = conn->telnetd_pid;
270 atomic_dec(&port->wait_count);
274 void port_remove_dev(void *d)
276 struct port_dev *dev = d;
278 if (dev->helper_pid != -1)
279 os_kill_process(dev->helper_pid, 0);
280 if (dev->telnetd_pid != -1)
281 os_kill_process(dev->telnetd_pid, 1);
282 dev->helper_pid = -1;
283 dev->telnetd_pid = -1;
286 void port_kern_free(void *d)
288 struct port_dev *dev = d;
290 port_remove_dev(dev);
294 static void free_port(void)
296 struct list_head *ele;
297 struct port_list *port;
299 list_for_each(ele, &ports) {
300 port = list_entry(ele, struct port_list, list);
301 free_irq_by_fd(port->fd);
302 os_close_file(port->fd);
306 __uml_exitcall(free_port);