2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include <linux/slab.h>
8 #include <linux/tty_flip.h>
13 #ifdef CONFIG_NOCONFIG_CHAN
14 static void *not_configged_init(char *str, int device,
15 const struct chan_opts *opts)
17 printk(KERN_ERR "Using a channel type which is configured out of "
22 static int not_configged_open(int input, int output, int primary, void *data,
25 printk(KERN_ERR "Using a channel type which is configured out of "
30 static void not_configged_close(int fd, void *data)
32 printk(KERN_ERR "Using a channel type which is configured out of "
36 static int not_configged_read(int fd, char *c_out, void *data)
38 printk(KERN_ERR "Using a channel type which is configured out of "
43 static int not_configged_write(int fd, const char *buf, int len, void *data)
45 printk(KERN_ERR "Using a channel type which is configured out of "
50 static int not_configged_console_write(int fd, const char *buf, int len)
52 printk(KERN_ERR "Using a channel type which is configured out of "
57 static int not_configged_window_size(int fd, void *data, unsigned short *rows,
60 printk(KERN_ERR "Using a channel type which is configured out of "
65 static void not_configged_free(void *data)
67 printk(KERN_ERR "Using a channel type which is configured out of "
71 static const struct chan_ops not_configged_ops = {
72 .init = not_configged_init,
73 .open = not_configged_open,
74 .close = not_configged_close,
75 .read = not_configged_read,
76 .write = not_configged_write,
77 .console_write = not_configged_console_write,
78 .window_size = not_configged_window_size,
79 .free = not_configged_free,
82 #endif /* CONFIG_NOCONFIG_CHAN */
84 static int open_one_chan(struct chan *chan)
91 if (chan->ops->open == NULL)
93 else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary,
94 chan->data, &chan->dev);
98 err = os_set_fd_block(fd, 0);
100 (*chan->ops->close)(fd, chan->data);
110 static int open_chan(struct list_head *chans)
112 struct list_head *ele;
116 list_for_each(ele, chans) {
117 chan = list_entry(ele, struct chan, list);
118 ret = open_one_chan(chan);
125 void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
127 if (chan && chan->primary && chan->ops->winch)
128 register_winch(chan->fd, tty);
131 static void line_timer_cb(struct work_struct *work)
133 struct line *line = container_of(work, struct line, task.work);
134 struct tty_struct *tty = tty_port_tty_get(&line->port);
136 if (!line->throttled)
137 chan_interrupt(line, tty, line->driver->read_irq);
141 int enable_chan(struct line *line)
143 struct list_head *ele;
147 INIT_DELAYED_WORK(&line->task, line_timer_cb);
149 list_for_each(ele, &line->chan_list) {
150 chan = list_entry(ele, struct chan, list);
151 err = open_one_chan(chan);
161 err = line_setup_irq(chan->fd, chan->input, chan->output, line,
176 /* Items are added in IRQ context, when free_irq can't be called, and
177 * removed in process context, when it can.
178 * This handles interrupt sources which disappear, and which need to
179 * be permanently disabled. This is discovered in IRQ context, but
180 * the freeing of the IRQ must be done later.
182 static DEFINE_SPINLOCK(irqs_to_free_lock);
183 static LIST_HEAD(irqs_to_free);
189 struct list_head *ele;
192 spin_lock_irqsave(&irqs_to_free_lock, flags);
193 list_splice_init(&irqs_to_free, &list);
194 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
196 list_for_each(ele, &list) {
197 chan = list_entry(ele, struct chan, free_list);
199 if (chan->input && chan->enabled)
200 um_free_irq(chan->line->driver->read_irq, chan);
201 if (chan->output && chan->enabled)
202 um_free_irq(chan->line->driver->write_irq, chan);
207 static void close_one_chan(struct chan *chan, int delay_free_irq)
214 if (delay_free_irq) {
215 spin_lock_irqsave(&irqs_to_free_lock, flags);
216 list_add(&chan->free_list, &irqs_to_free);
217 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
220 if (chan->input && chan->enabled)
221 um_free_irq(chan->line->driver->read_irq, chan);
222 if (chan->output && chan->enabled)
223 um_free_irq(chan->line->driver->write_irq, chan);
226 if (chan->ops->close != NULL)
227 (*chan->ops->close)(chan->fd, chan->data);
233 void close_chan(struct line *line)
237 /* Close in reverse order as open in case more than one of them
238 * refers to the same device and they save and restore that device's
239 * state. Then, the first one opened will have the original state,
240 * so it must be the last closed.
242 list_for_each_entry_reverse(chan, &line->chan_list, list) {
243 close_one_chan(chan, 0);
247 void deactivate_chan(struct chan *chan, int irq)
249 if (chan && chan->enabled)
250 deactivate_fd(chan->fd, irq);
253 void reactivate_chan(struct chan *chan, int irq)
255 if (chan && chan->enabled)
256 reactivate_fd(chan->fd, irq);
259 int write_chan(struct chan *chan, const char *buf, int len,
264 if (len == 0 || !chan || !chan->ops->write)
267 n = chan->ops->write(chan->fd, buf, len, chan->data);
270 if ((ret == -EAGAIN) || ((ret >= 0) && (ret < len)))
271 reactivate_fd(chan->fd, write_irq);
276 int console_write_chan(struct chan *chan, const char *buf, int len)
280 if (!chan || !chan->ops->console_write)
283 n = chan->ops->console_write(chan->fd, buf, len);
289 int console_open_chan(struct line *line, struct console *co)
293 err = open_chan(&line->chan_list);
297 printk(KERN_INFO "Console initialized on /dev/%s%d\n", co->name,
302 int chan_window_size(struct line *line, unsigned short *rows_out,
303 unsigned short *cols_out)
307 chan = line->chan_in;
308 if (chan && chan->primary) {
309 if (chan->ops->window_size == NULL)
311 return chan->ops->window_size(chan->fd, chan->data,
314 chan = line->chan_out;
315 if (chan && chan->primary) {
316 if (chan->ops->window_size == NULL)
318 return chan->ops->window_size(chan->fd, chan->data,
324 static void free_one_chan(struct chan *chan)
326 list_del(&chan->list);
328 close_one_chan(chan, 0);
330 if (chan->ops->free != NULL)
331 (*chan->ops->free)(chan->data);
333 if (chan->primary && chan->output)
334 ignore_sigio_fd(chan->fd);
338 static void free_chan(struct list_head *chans)
340 struct list_head *ele, *next;
343 list_for_each_safe(ele, next, chans) {
344 chan = list_entry(ele, struct chan, list);
349 static int one_chan_config_string(struct chan *chan, char *str, int size,
355 CONFIG_CHUNK(str, size, n, "none", 1);
359 CONFIG_CHUNK(str, size, n, chan->ops->type, 0);
361 if (chan->dev == NULL) {
362 CONFIG_CHUNK(str, size, n, "", 1);
366 CONFIG_CHUNK(str, size, n, ":", 0);
367 CONFIG_CHUNK(str, size, n, chan->dev, 0);
372 static int chan_pair_config_string(struct chan *in, struct chan *out,
373 char *str, int size, char **error_out)
377 n = one_chan_config_string(in, str, size, error_out);
382 CONFIG_CHUNK(str, size, n, "", 1);
386 CONFIG_CHUNK(str, size, n, ",", 1);
387 n = one_chan_config_string(out, str, size, error_out);
390 CONFIG_CHUNK(str, size, n, "", 1);
395 int chan_config_string(struct line *line, char *str, int size,
398 struct chan *in = line->chan_in, *out = line->chan_out;
400 if (in && !in->primary)
402 if (out && !out->primary)
405 return chan_pair_config_string(in, out, str, size, error_out);
410 const struct chan_ops *ops;
413 static const struct chan_type chan_table[] = {
416 #ifdef CONFIG_NULL_CHAN
417 { "null", &null_ops },
419 { "null", ¬_configged_ops },
422 #ifdef CONFIG_PORT_CHAN
423 { "port", &port_ops },
425 { "port", ¬_configged_ops },
428 #ifdef CONFIG_PTY_CHAN
432 { "pty", ¬_configged_ops },
433 { "pts", ¬_configged_ops },
436 #ifdef CONFIG_TTY_CHAN
439 { "tty", ¬_configged_ops },
442 #ifdef CONFIG_XTERM_CHAN
443 { "xterm", &xterm_ops },
445 { "xterm", ¬_configged_ops },
449 static struct chan *parse_chan(struct line *line, char *str, int device,
450 const struct chan_opts *opts, char **error_out)
452 const struct chan_type *entry;
453 const struct chan_ops *ops;
460 for(i = 0; i < ARRAY_SIZE(chan_table); i++) {
461 entry = &chan_table[i];
462 if (!strncmp(str, entry->key, strlen(entry->key))) {
464 str += strlen(entry->key);
469 *error_out = "No match for configured backends";
473 data = (*ops->init)(str, device, opts);
475 *error_out = "Configuration failed";
479 chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
481 *error_out = "Memory allocation failed";
484 *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
486 LIST_HEAD_INIT(chan->free_list),
499 int parse_chan_pair(char *str, struct line *line, int device,
500 const struct chan_opts *opts, char **error_out)
502 struct list_head *chans = &line->chan_list;
506 if (!list_empty(chans)) {
507 line->chan_in = line->chan_out = NULL;
509 INIT_LIST_HEAD(chans);
515 out = strchr(str, ',');
520 new = parse_chan(line, in, device, opts, error_out);
525 list_add(&new->list, chans);
528 new = parse_chan(line, out, device, opts, error_out);
532 list_add(&new->list, chans);
534 line->chan_out = new;
537 new = parse_chan(line, str, device, opts, error_out);
541 list_add(&new->list, chans);
544 line->chan_in = line->chan_out = new;
549 void chan_interrupt(struct line *line, struct tty_struct *tty, int irq)
551 struct tty_port *port = &line->port;
552 struct chan *chan = line->chan_in;
556 if (!chan || !chan->ops->read)
560 if (!tty_buffer_request_room(port, 1)) {
561 schedule_delayed_work(&line->task, 1);
564 err = chan->ops->read(chan->fd, &c, chan->data);
566 tty_insert_flip_char(port, c, TTY_NORMAL);
570 reactivate_fd(chan->fd, irq);
575 if (line->chan_out != chan)
576 close_one_chan(line->chan_out, 1);
578 close_one_chan(chan, 1);
584 tty_flip_buffer_push(tty);