2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
13 #ifdef CONFIG_NOCONFIG_CHAN
14 static void *not_configged_init(char *str, int device,
15 const struct chan_opts *opts)
17 printk(KERN_ERR "Using a channel type which is configured out of "
22 static int not_configged_open(int input, int output, int primary, void *data,
25 printk(KERN_ERR "Using a channel type which is configured out of "
30 static void not_configged_close(int fd, void *data)
32 printk(KERN_ERR "Using a channel type which is configured out of "
36 static int not_configged_read(int fd, char *c_out, void *data)
38 printk(KERN_ERR "Using a channel type which is configured out of "
43 static int not_configged_write(int fd, const char *buf, int len, void *data)
45 printk(KERN_ERR "Using a channel type which is configured out of "
50 static int not_configged_console_write(int fd, const char *buf, int len)
52 printk(KERN_ERR "Using a channel type which is configured out of "
57 static int not_configged_window_size(int fd, void *data, unsigned short *rows,
60 printk(KERN_ERR "Using a channel type which is configured out of "
65 static void not_configged_free(void *data)
67 printk(KERN_ERR "Using a channel type which is configured out of "
71 static const struct chan_ops not_configged_ops = {
72 .init = not_configged_init,
73 .open = not_configged_open,
74 .close = not_configged_close,
75 .read = not_configged_read,
76 .write = not_configged_write,
77 .console_write = not_configged_console_write,
78 .window_size = not_configged_window_size,
79 .free = not_configged_free,
82 #endif /* CONFIG_NOCONFIG_CHAN */
84 static void tty_receive_char(struct tty_struct *tty, char ch)
87 tty_insert_flip_char(tty, ch, TTY_NORMAL);
90 static int open_one_chan(struct chan *chan)
97 if (chan->ops->open == NULL)
99 else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary,
100 chan->data, &chan->dev);
104 err = os_set_fd_block(fd, 0);
106 (*chan->ops->close)(fd, chan->data);
116 static int open_chan(struct list_head *chans)
118 struct list_head *ele;
122 list_for_each(ele, chans) {
123 chan = list_entry(ele, struct chan, list);
124 ret = open_one_chan(chan);
131 void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
133 if (chan && chan->primary && chan->ops->winch)
134 register_winch(chan->fd, tty);
137 static void line_timer_cb(struct work_struct *work)
139 struct line *line = container_of(work, struct line, task.work);
140 struct tty_struct *tty = tty_port_tty_get(&line->port);
142 if (!line->throttled)
143 chan_interrupt(line, tty, line->driver->read_irq);
147 int enable_chan(struct line *line)
149 struct list_head *ele;
153 INIT_DELAYED_WORK(&line->task, line_timer_cb);
155 list_for_each(ele, &line->chan_list) {
156 chan = list_entry(ele, struct chan, list);
157 err = open_one_chan(chan);
167 err = line_setup_irq(chan->fd, chan->input, chan->output, line,
182 /* Items are added in IRQ context, when free_irq can't be called, and
183 * removed in process context, when it can.
184 * This handles interrupt sources which disappear, and which need to
185 * be permanently disabled. This is discovered in IRQ context, but
186 * the freeing of the IRQ must be done later.
188 static DEFINE_SPINLOCK(irqs_to_free_lock);
189 static LIST_HEAD(irqs_to_free);
195 struct list_head *ele;
198 spin_lock_irqsave(&irqs_to_free_lock, flags);
199 list_splice_init(&irqs_to_free, &list);
200 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
202 list_for_each(ele, &list) {
203 chan = list_entry(ele, struct chan, free_list);
205 if (chan->input && chan->enabled)
206 um_free_irq(chan->line->driver->read_irq, chan);
207 if (chan->output && chan->enabled)
208 um_free_irq(chan->line->driver->write_irq, chan);
213 static void close_one_chan(struct chan *chan, int delay_free_irq)
220 if (delay_free_irq) {
221 spin_lock_irqsave(&irqs_to_free_lock, flags);
222 list_add(&chan->free_list, &irqs_to_free);
223 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
226 if (chan->input && chan->enabled)
227 um_free_irq(chan->line->driver->read_irq, chan);
228 if (chan->output && chan->enabled)
229 um_free_irq(chan->line->driver->write_irq, chan);
232 if (chan->ops->close != NULL)
233 (*chan->ops->close)(chan->fd, chan->data);
239 void close_chan(struct line *line)
243 /* Close in reverse order as open in case more than one of them
244 * refers to the same device and they save and restore that device's
245 * state. Then, the first one opened will have the original state,
246 * so it must be the last closed.
248 list_for_each_entry_reverse(chan, &line->chan_list, list) {
249 close_one_chan(chan, 0);
253 void deactivate_chan(struct chan *chan, int irq)
255 if (chan && chan->enabled)
256 deactivate_fd(chan->fd, irq);
259 void reactivate_chan(struct chan *chan, int irq)
261 if (chan && chan->enabled)
262 reactivate_fd(chan->fd, irq);
265 int write_chan(struct chan *chan, const char *buf, int len,
270 if (len == 0 || !chan || !chan->ops->write)
273 n = chan->ops->write(chan->fd, buf, len, chan->data);
276 if ((ret == -EAGAIN) || ((ret >= 0) && (ret < len)))
277 reactivate_fd(chan->fd, write_irq);
282 int console_write_chan(struct chan *chan, const char *buf, int len)
286 if (!chan || !chan->ops->console_write)
289 n = chan->ops->console_write(chan->fd, buf, len);
295 int console_open_chan(struct line *line, struct console *co)
299 err = open_chan(&line->chan_list);
303 printk(KERN_INFO "Console initialized on /dev/%s%d\n", co->name,
308 int chan_window_size(struct line *line, unsigned short *rows_out,
309 unsigned short *cols_out)
313 chan = line->chan_in;
314 if (chan && chan->primary) {
315 if (chan->ops->window_size == NULL)
317 return chan->ops->window_size(chan->fd, chan->data,
320 chan = line->chan_out;
321 if (chan && chan->primary) {
322 if (chan->ops->window_size == NULL)
324 return chan->ops->window_size(chan->fd, chan->data,
330 static void free_one_chan(struct chan *chan)
332 list_del(&chan->list);
334 close_one_chan(chan, 0);
336 if (chan->ops->free != NULL)
337 (*chan->ops->free)(chan->data);
339 if (chan->primary && chan->output)
340 ignore_sigio_fd(chan->fd);
344 static void free_chan(struct list_head *chans)
346 struct list_head *ele, *next;
349 list_for_each_safe(ele, next, chans) {
350 chan = list_entry(ele, struct chan, list);
355 static int one_chan_config_string(struct chan *chan, char *str, int size,
361 CONFIG_CHUNK(str, size, n, "none", 1);
365 CONFIG_CHUNK(str, size, n, chan->ops->type, 0);
367 if (chan->dev == NULL) {
368 CONFIG_CHUNK(str, size, n, "", 1);
372 CONFIG_CHUNK(str, size, n, ":", 0);
373 CONFIG_CHUNK(str, size, n, chan->dev, 0);
378 static int chan_pair_config_string(struct chan *in, struct chan *out,
379 char *str, int size, char **error_out)
383 n = one_chan_config_string(in, str, size, error_out);
388 CONFIG_CHUNK(str, size, n, "", 1);
392 CONFIG_CHUNK(str, size, n, ",", 1);
393 n = one_chan_config_string(out, str, size, error_out);
396 CONFIG_CHUNK(str, size, n, "", 1);
401 int chan_config_string(struct line *line, char *str, int size,
404 struct chan *in = line->chan_in, *out = line->chan_out;
406 if (in && !in->primary)
408 if (out && !out->primary)
411 return chan_pair_config_string(in, out, str, size, error_out);
416 const struct chan_ops *ops;
419 static const struct chan_type chan_table[] = {
422 #ifdef CONFIG_NULL_CHAN
423 { "null", &null_ops },
425 { "null", ¬_configged_ops },
428 #ifdef CONFIG_PORT_CHAN
429 { "port", &port_ops },
431 { "port", ¬_configged_ops },
434 #ifdef CONFIG_PTY_CHAN
438 { "pty", ¬_configged_ops },
439 { "pts", ¬_configged_ops },
442 #ifdef CONFIG_TTY_CHAN
445 { "tty", ¬_configged_ops },
448 #ifdef CONFIG_XTERM_CHAN
449 { "xterm", &xterm_ops },
451 { "xterm", ¬_configged_ops },
455 static struct chan *parse_chan(struct line *line, char *str, int device,
456 const struct chan_opts *opts, char **error_out)
458 const struct chan_type *entry;
459 const struct chan_ops *ops;
466 for(i = 0; i < ARRAY_SIZE(chan_table); i++) {
467 entry = &chan_table[i];
468 if (!strncmp(str, entry->key, strlen(entry->key))) {
470 str += strlen(entry->key);
475 *error_out = "No match for configured backends";
479 data = (*ops->init)(str, device, opts);
481 *error_out = "Configuration failed";
485 chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
487 *error_out = "Memory allocation failed";
490 *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
492 LIST_HEAD_INIT(chan->free_list),
505 int parse_chan_pair(char *str, struct line *line, int device,
506 const struct chan_opts *opts, char **error_out)
508 struct list_head *chans = &line->chan_list;
512 if (!list_empty(chans)) {
513 line->chan_in = line->chan_out = NULL;
515 INIT_LIST_HEAD(chans);
521 out = strchr(str, ',');
526 new = parse_chan(line, in, device, opts, error_out);
531 list_add(&new->list, chans);
534 new = parse_chan(line, out, device, opts, error_out);
538 list_add(&new->list, chans);
540 line->chan_out = new;
543 new = parse_chan(line, str, device, opts, error_out);
547 list_add(&new->list, chans);
550 line->chan_in = line->chan_out = new;
555 void chan_interrupt(struct line *line, struct tty_struct *tty, int irq)
557 struct chan *chan = line->chan_in;
561 if (!chan || !chan->ops->read)
565 if (tty && !tty_buffer_request_room(tty, 1)) {
566 schedule_delayed_work(&line->task, 1);
569 err = chan->ops->read(chan->fd, &c, chan->data);
571 tty_receive_char(tty, c);
575 reactivate_fd(chan->fd, irq);
580 if (line->chan_out != chan)
581 close_one_chan(line->chan_out, 1);
583 close_one_chan(chan, 1);
589 tty_flip_buffer_push(tty);