2 * SCLP VT220 terminal driver.
4 * Copyright IBM Corp. 2003, 2009
6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/list.h>
12 #include <linux/wait.h>
13 #include <linux/timer.h>
14 #include <linux/kernel.h>
15 #include <linux/tty.h>
16 #include <linux/tty_driver.h>
17 #include <linux/tty_flip.h>
18 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/console.h>
22 #include <linux/kdev_t.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/reboot.h>
26 #include <linux/slab.h>
28 #include <asm/uaccess.h>
31 #define SCLP_VT220_MAJOR TTY_MAJOR
32 #define SCLP_VT220_MINOR 65
33 #define SCLP_VT220_DRIVER_NAME "sclp_vt220"
34 #define SCLP_VT220_DEVICE_NAME "ttysclp"
35 #define SCLP_VT220_CONSOLE_NAME "ttyS"
36 #define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
38 /* Representation of a single write request */
39 struct sclp_vt220_request {
40 struct list_head list;
41 struct sclp_req sclp_req;
46 struct sclp_vt220_sccb {
47 struct sccb_header header;
48 struct evbuf_header evbuf;
51 #define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
52 sizeof(struct sclp_vt220_request) - \
53 sizeof(struct sclp_vt220_sccb))
55 /* Structures and data needed to register tty driver */
56 static struct tty_driver *sclp_vt220_driver;
58 static struct tty_port sclp_vt220_port;
60 /* Lock to protect internal data from concurrent access */
61 static spinlock_t sclp_vt220_lock;
63 /* List of empty pages to be used as write request buffers */
64 static struct list_head sclp_vt220_empty;
66 /* List of pending requests */
67 static struct list_head sclp_vt220_outqueue;
69 /* Suspend mode flag */
70 static int sclp_vt220_suspended;
72 /* Flag that output queue is currently running */
73 static int sclp_vt220_queue_running;
75 /* Timer used for delaying write requests to merge subsequent messages into
77 static struct timer_list sclp_vt220_timer;
79 /* Pointer to current request buffer which has been partially filled but not
81 static struct sclp_vt220_request *sclp_vt220_current_request;
83 /* Number of characters in current request buffer */
84 static int sclp_vt220_buffered_chars;
86 /* Counter controlling core driver initialization. */
87 static int __initdata sclp_vt220_init_count;
89 /* Flag indicating that sclp_vt220_current_request should really
90 * have been already queued but wasn't because the SCLP was processing
92 static int sclp_vt220_flush_later;
94 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
95 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
96 enum sclp_pm_event sclp_pm_event);
97 static int __sclp_vt220_emit(struct sclp_vt220_request *request);
98 static void sclp_vt220_emit_current(void);
100 /* Registration structure for our interest in SCLP event buffers */
101 static struct sclp_register sclp_vt220_register = {
102 .send_mask = EVTYP_VT220MSG_MASK,
103 .receive_mask = EVTYP_VT220MSG_MASK,
104 .state_change_fn = NULL,
105 .receiver_fn = sclp_vt220_receiver_fn,
106 .pm_event_fn = sclp_vt220_pm_event_fn,
111 * Put provided request buffer back into queue and check emit pending
112 * buffers if necessary.
115 sclp_vt220_process_queue(struct sclp_vt220_request *request)
117 struct tty_struct *tty;
122 /* Put buffer back to list of empty buffers */
123 page = request->sclp_req.sccb;
124 spin_lock_irqsave(&sclp_vt220_lock, flags);
125 /* Move request from outqueue to empty queue */
126 list_del(&request->list);
127 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
128 /* Check if there is a pending buffer on the out queue. */
130 if (!list_empty(&sclp_vt220_outqueue))
131 request = list_entry(sclp_vt220_outqueue.next,
132 struct sclp_vt220_request, list);
133 if (!request || sclp_vt220_suspended) {
134 sclp_vt220_queue_running = 0;
135 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
138 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
139 } while (__sclp_vt220_emit(request));
140 if (request == NULL && sclp_vt220_flush_later)
141 sclp_vt220_emit_current();
142 /* Check if the tty needs a wake up call */
143 tty = tty_port_tty_get(&sclp_vt220_port);
150 #define SCLP_BUFFER_MAX_RETRY 1
153 * Callback through which the result of a write request is reported by the
157 sclp_vt220_callback(struct sclp_req *request, void *data)
159 struct sclp_vt220_request *vt220_request;
160 struct sclp_vt220_sccb *sccb;
162 vt220_request = (struct sclp_vt220_request *) data;
163 if (request->status == SCLP_REQ_FAILED) {
164 sclp_vt220_process_queue(vt220_request);
167 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
169 /* Check SCLP response code and choose suitable action */
170 switch (sccb->header.response_code) {
174 case 0x05f0: /* Target resource in improper state */
177 case 0x0340: /* Contained SCLP equipment check */
178 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
180 /* Remove processed buffers and requeue rest */
181 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
182 /* Not all buffers were processed */
183 sccb->header.response_code = 0x0000;
184 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
185 if (sclp_add_request(request) == 0)
190 case 0x0040: /* SCLP equipment check */
191 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
193 sccb->header.response_code = 0x0000;
194 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
195 if (sclp_add_request(request) == 0)
202 sclp_vt220_process_queue(vt220_request);
206 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
210 __sclp_vt220_emit(struct sclp_vt220_request *request)
212 if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
213 request->sclp_req.status = SCLP_REQ_FAILED;
216 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
217 request->sclp_req.status = SCLP_REQ_FILLED;
218 request->sclp_req.callback = sclp_vt220_callback;
219 request->sclp_req.callback_data = (void *) request;
221 return sclp_add_request(&request->sclp_req);
225 * Queue and emit current request.
228 sclp_vt220_emit_current(void)
231 struct sclp_vt220_request *request;
232 struct sclp_vt220_sccb *sccb;
234 spin_lock_irqsave(&sclp_vt220_lock, flags);
235 if (sclp_vt220_current_request) {
236 sccb = (struct sclp_vt220_sccb *)
237 sclp_vt220_current_request->sclp_req.sccb;
238 /* Only emit buffers with content */
239 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
240 list_add_tail(&sclp_vt220_current_request->list,
241 &sclp_vt220_outqueue);
242 sclp_vt220_current_request = NULL;
243 if (timer_pending(&sclp_vt220_timer))
244 del_timer(&sclp_vt220_timer);
246 sclp_vt220_flush_later = 0;
248 if (sclp_vt220_queue_running || sclp_vt220_suspended)
250 if (list_empty(&sclp_vt220_outqueue))
252 request = list_first_entry(&sclp_vt220_outqueue,
253 struct sclp_vt220_request, list);
254 sclp_vt220_queue_running = 1;
255 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
257 if (__sclp_vt220_emit(request))
258 sclp_vt220_process_queue(request);
261 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
264 #define SCLP_NORMAL_WRITE 0x00
267 * Helper function to initialize a page with the sclp request structure.
269 static struct sclp_vt220_request *
270 sclp_vt220_initialize_page(void *page)
272 struct sclp_vt220_request *request;
273 struct sclp_vt220_sccb *sccb;
275 /* Place request structure at end of page */
276 request = ((struct sclp_vt220_request *)
277 ((addr_t) page + PAGE_SIZE)) - 1;
278 request->retry_count = 0;
279 request->sclp_req.sccb = page;
280 /* SCCB goes at start of page */
281 sccb = (struct sclp_vt220_sccb *) page;
282 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
283 sccb->header.length = sizeof(struct sclp_vt220_sccb);
284 sccb->header.function_code = SCLP_NORMAL_WRITE;
285 sccb->header.response_code = 0x0000;
286 sccb->evbuf.type = EVTYP_VT220MSG;
287 sccb->evbuf.length = sizeof(struct evbuf_header);
292 static inline unsigned int
293 sclp_vt220_space_left(struct sclp_vt220_request *request)
295 struct sclp_vt220_sccb *sccb;
296 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
297 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
301 static inline unsigned int
302 sclp_vt220_chars_stored(struct sclp_vt220_request *request)
304 struct sclp_vt220_sccb *sccb;
305 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
306 return sccb->evbuf.length - sizeof(struct evbuf_header);
310 * Add msg to buffer associated with request. Return the number of characters
314 sclp_vt220_add_msg(struct sclp_vt220_request *request,
315 const unsigned char *msg, int count, int convertlf)
317 struct sclp_vt220_sccb *sccb;
323 if (count > sclp_vt220_space_left(request))
324 count = sclp_vt220_space_left(request);
328 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
329 buffer = (void *) ((addr_t) sccb + sccb->header.length);
332 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
334 (from < count) && (to < sclp_vt220_space_left(request));
336 /* Retrieve character */
338 /* Perform conversion */
340 if (to + 1 < sclp_vt220_space_left(request)) {
341 ((unsigned char *) buffer)[to++] = c;
342 ((unsigned char *) buffer)[to++] = 0x0d;
347 ((unsigned char *) buffer)[to++] = c;
349 sccb->header.length += to;
350 sccb->evbuf.length += to;
353 memcpy(buffer, (const void *) msg, count);
354 sccb->header.length += count;
355 sccb->evbuf.length += count;
361 * Emit buffer after having waited long enough for more data to arrive.
364 sclp_vt220_timeout(unsigned long data)
366 sclp_vt220_emit_current();
369 #define BUFFER_MAX_DELAY HZ/20
372 * Internal implementation of the write function. Write COUNT bytes of data
374 * to the SCLP interface. In case that the data does not fit into the current
375 * write buffer, emit the current one and allocate a new one. If there are no
376 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
377 * is non-zero, the buffer will be scheduled for emitting after a timeout -
378 * otherwise the user has to explicitly call the flush function.
379 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
380 * buffer should be converted to 0x0a 0x0d. After completion, return the number
384 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
385 int convertlf, int may_fail)
395 spin_lock_irqsave(&sclp_vt220_lock, flags);
397 /* Create an sclp output buffer if none exists yet */
398 if (sclp_vt220_current_request == NULL) {
399 while (list_empty(&sclp_vt220_empty)) {
400 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
401 if (may_fail || sclp_vt220_suspended)
405 spin_lock_irqsave(&sclp_vt220_lock, flags);
407 page = (void *) sclp_vt220_empty.next;
408 list_del((struct list_head *) page);
409 sclp_vt220_current_request =
410 sclp_vt220_initialize_page(page);
412 /* Try to write the string to the current request buffer */
413 written = sclp_vt220_add_msg(sclp_vt220_current_request,
414 buf, count, convertlf);
415 overall_written += written;
416 if (written == count)
419 * Not all characters could be written to the current
420 * output buffer. Emit the buffer, create a new buffer
421 * and then output the rest of the string.
423 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
424 sclp_vt220_emit_current();
425 spin_lock_irqsave(&sclp_vt220_lock, flags);
429 /* Setup timer to output current console buffer after some time */
430 if (sclp_vt220_current_request != NULL &&
431 !timer_pending(&sclp_vt220_timer) && do_schedule) {
432 sclp_vt220_timer.function = sclp_vt220_timeout;
433 sclp_vt220_timer.data = 0UL;
434 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
435 add_timer(&sclp_vt220_timer);
437 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
439 return overall_written;
443 * This routine is called by the kernel to write a series of
444 * characters to the tty device. The characters may come from
445 * user space or kernel space. This routine will return the
446 * number of characters actually accepted for writing.
449 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
451 return __sclp_vt220_write(buf, count, 1, 0, 1);
454 #define SCLP_VT220_SESSION_ENDED 0x01
455 #define SCLP_VT220_SESSION_STARTED 0x80
456 #define SCLP_VT220_SESSION_DATA 0x00
459 * Called by the SCLP to report incoming event buffers.
462 sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
464 struct tty_struct *tty = tty_port_tty_get(&sclp_vt220_port);
468 /* Ignore input if device is not open */
472 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
473 count = evbuf->length - sizeof(struct evbuf_header);
476 case SCLP_VT220_SESSION_ENDED:
477 case SCLP_VT220_SESSION_STARTED:
479 case SCLP_VT220_SESSION_DATA:
480 /* Send input to line discipline */
483 tty_insert_flip_string(tty, buffer, count);
484 tty_flip_buffer_push(tty);
491 * This routine is called when a particular tty device is opened.
494 sclp_vt220_open(struct tty_struct *tty, struct file *filp)
496 if (tty->count == 1) {
497 tty_port_tty_set(&sclp_vt220_port, tty);
498 tty->low_latency = 0;
499 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
500 tty->winsize.ws_row = 24;
501 tty->winsize.ws_col = 80;
508 * This routine is called when a particular tty device is closed.
511 sclp_vt220_close(struct tty_struct *tty, struct file *filp)
514 tty_port_tty_set(&sclp_vt220_port, NULL);
518 * This routine is called by the kernel to write a single
519 * character to the tty device. If the kernel uses this routine,
520 * it must call the flush_chars() routine (if defined) when it is
521 * done stuffing characters into the driver.
524 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
526 return __sclp_vt220_write(&ch, 1, 0, 0, 1);
530 * This routine is called by the kernel after it has written a
531 * series of characters to the tty device using put_char().
534 sclp_vt220_flush_chars(struct tty_struct *tty)
536 if (!sclp_vt220_queue_running)
537 sclp_vt220_emit_current();
539 sclp_vt220_flush_later = 1;
543 * This routine returns the numbers of characters the tty driver
544 * will accept for queuing to be written. This number is subject
545 * to change as output buffers get emptied, or if the output flow
549 sclp_vt220_write_room(struct tty_struct *tty)
555 spin_lock_irqsave(&sclp_vt220_lock, flags);
557 if (sclp_vt220_current_request != NULL)
558 count = sclp_vt220_space_left(sclp_vt220_current_request);
559 list_for_each(l, &sclp_vt220_empty)
560 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
561 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
566 * Return number of buffered chars.
569 sclp_vt220_chars_in_buffer(struct tty_struct *tty)
573 struct sclp_vt220_request *r;
576 spin_lock_irqsave(&sclp_vt220_lock, flags);
578 if (sclp_vt220_current_request != NULL)
579 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
580 list_for_each(l, &sclp_vt220_outqueue) {
581 r = list_entry(l, struct sclp_vt220_request, list);
582 count += sclp_vt220_chars_stored(r);
584 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
589 * Pass on all buffers to the hardware. Return only when there are no more
593 sclp_vt220_flush_buffer(struct tty_struct *tty)
595 sclp_vt220_emit_current();
598 /* Release allocated pages. */
599 static void __init __sclp_vt220_free_pages(void)
601 struct list_head *page, *p;
603 list_for_each_safe(page, p, &sclp_vt220_empty) {
605 free_page((unsigned long) page);
609 /* Release memory and unregister from sclp core. Controlled by init counting -
610 * only the last invoker will actually perform these actions. */
611 static void __init __sclp_vt220_cleanup(void)
613 sclp_vt220_init_count--;
614 if (sclp_vt220_init_count != 0)
616 sclp_unregister(&sclp_vt220_register);
617 __sclp_vt220_free_pages();
620 /* Allocate buffer pages and register with sclp core. Controlled by init
621 * counting - only the first invoker will actually perform these actions. */
622 static int __init __sclp_vt220_init(int num_pages)
628 sclp_vt220_init_count++;
629 if (sclp_vt220_init_count != 1)
631 spin_lock_init(&sclp_vt220_lock);
632 INIT_LIST_HEAD(&sclp_vt220_empty);
633 INIT_LIST_HEAD(&sclp_vt220_outqueue);
634 init_timer(&sclp_vt220_timer);
635 tty_port_init(&sclp_vt220_port);
636 sclp_vt220_current_request = NULL;
637 sclp_vt220_buffered_chars = 0;
638 sclp_vt220_flush_later = 0;
640 /* Allocate pages for output buffering */
642 for (i = 0; i < num_pages; i++) {
643 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
646 list_add_tail(page, &sclp_vt220_empty);
648 rc = sclp_register(&sclp_vt220_register);
651 __sclp_vt220_free_pages();
652 sclp_vt220_init_count--;
657 static const struct tty_operations sclp_vt220_ops = {
658 .open = sclp_vt220_open,
659 .close = sclp_vt220_close,
660 .write = sclp_vt220_write,
661 .put_char = sclp_vt220_put_char,
662 .flush_chars = sclp_vt220_flush_chars,
663 .write_room = sclp_vt220_write_room,
664 .chars_in_buffer = sclp_vt220_chars_in_buffer,
665 .flush_buffer = sclp_vt220_flush_buffer,
669 * Register driver with SCLP and Linux and initialize internal tty structures.
671 static int __init sclp_vt220_tty_init(void)
673 struct tty_driver *driver;
676 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
677 * symmetry between VM and LPAR systems regarding ttyS1. */
678 driver = alloc_tty_driver(1);
681 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
685 driver->driver_name = SCLP_VT220_DRIVER_NAME;
686 driver->name = SCLP_VT220_DEVICE_NAME;
687 driver->major = SCLP_VT220_MAJOR;
688 driver->minor_start = SCLP_VT220_MINOR;
689 driver->type = TTY_DRIVER_TYPE_SYSTEM;
690 driver->subtype = SYSTEM_TYPE_TTY;
691 driver->init_termios = tty_std_termios;
692 driver->flags = TTY_DRIVER_REAL_RAW;
693 tty_set_operations(driver, &sclp_vt220_ops);
695 rc = tty_register_driver(driver);
698 sclp_vt220_driver = driver;
702 __sclp_vt220_cleanup();
704 put_tty_driver(driver);
707 __initcall(sclp_vt220_tty_init);
709 static void __sclp_vt220_flush_buffer(void)
713 sclp_vt220_emit_current();
714 spin_lock_irqsave(&sclp_vt220_lock, flags);
715 if (timer_pending(&sclp_vt220_timer))
716 del_timer(&sclp_vt220_timer);
717 while (sclp_vt220_queue_running) {
718 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
720 spin_lock_irqsave(&sclp_vt220_lock, flags);
722 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
726 * Resume console: If there are cached messages, emit them.
728 static void sclp_vt220_resume(void)
732 spin_lock_irqsave(&sclp_vt220_lock, flags);
733 sclp_vt220_suspended = 0;
734 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
735 sclp_vt220_emit_current();
739 * Suspend console: Set suspend flag and flush console
741 static void sclp_vt220_suspend(void)
745 spin_lock_irqsave(&sclp_vt220_lock, flags);
746 sclp_vt220_suspended = 1;
747 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
748 __sclp_vt220_flush_buffer();
751 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
752 enum sclp_pm_event sclp_pm_event)
754 switch (sclp_pm_event) {
755 case SCLP_PM_EVENT_FREEZE:
756 sclp_vt220_suspend();
758 case SCLP_PM_EVENT_RESTORE:
759 case SCLP_PM_EVENT_THAW:
765 #ifdef CONFIG_SCLP_VT220_CONSOLE
768 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
770 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
773 static struct tty_driver *
774 sclp_vt220_con_device(struct console *c, int *index)
777 return sclp_vt220_driver;
781 sclp_vt220_notify(struct notifier_block *self,
782 unsigned long event, void *data)
784 __sclp_vt220_flush_buffer();
788 static struct notifier_block on_panic_nb = {
789 .notifier_call = sclp_vt220_notify,
793 static struct notifier_block on_reboot_nb = {
794 .notifier_call = sclp_vt220_notify,
798 /* Structure needed to register with printk */
799 static struct console sclp_vt220_console =
801 .name = SCLP_VT220_CONSOLE_NAME,
802 .write = sclp_vt220_con_write,
803 .device = sclp_vt220_con_device,
804 .flags = CON_PRINTBUFFER,
805 .index = SCLP_VT220_CONSOLE_INDEX
809 sclp_vt220_con_init(void)
813 if (!CONSOLE_IS_SCLP)
815 rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
818 /* Attach linux console */
819 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
820 register_reboot_notifier(&on_reboot_nb);
821 register_console(&sclp_vt220_console);
825 console_initcall(sclp_vt220_con_init);
826 #endif /* CONFIG_SCLP_VT220_CONSOLE */