2 * Tty buffer allocation management
5 #include <linux/types.h>
6 #include <linux/errno.h>
8 #include <linux/tty_driver.h>
9 #include <linux/tty_flip.h>
10 #include <linux/timer.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/bitops.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
21 * tty_buffer_free_all - free buffers used by a tty
22 * @tty: tty to free from
24 * Remove all the buffers pending on a tty whether queued with data
25 * or in the free ring. Must be called when the tty is no longer in use
30 void tty_buffer_free_all(struct tty_struct *tty)
32 struct tty_bufhead *buf = &tty->buf;
33 struct tty_buffer *thead;
35 while ((thead = buf->head) != NULL) {
36 buf->head = thead->next;
39 while ((thead = buf->free) != NULL) {
40 buf->free = thead->next;
48 * tty_buffer_alloc - allocate a tty buffer
50 * @size: desired size (characters)
52 * Allocate a new tty buffer to hold the desired number of characters.
53 * Return NULL if out of memory or the allocation would exceed the
56 * Locking: Caller must hold tty->buf.lock
59 static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
63 if (tty->buf.memory_used + size > 65536)
65 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
73 p->char_buf_ptr = (char *)(p->data);
74 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
75 tty->buf.memory_used += size;
80 * tty_buffer_free - free a tty buffer
81 * @tty: tty owning the buffer
82 * @b: the buffer to free
84 * Free a tty buffer, or add it to the free list according to our
87 * Locking: Caller must hold tty->buf.lock
90 static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
92 struct tty_bufhead *buf = &tty->buf;
94 /* Dumb strategy for now - should keep some stats */
95 buf->memory_used -= b->size;
96 WARN_ON(buf->memory_used < 0);
107 * __tty_buffer_flush - flush full tty buffers
110 * flush all the buffers containing receive data. Caller must
111 * hold the buffer lock and must have ensured no parallel flush to
114 * Locking: Caller must hold tty->buf.lock
117 static void __tty_buffer_flush(struct tty_struct *tty)
119 struct tty_bufhead *buf = &tty->buf;
120 struct tty_buffer *thead;
122 while ((thead = buf->head) != NULL) {
123 buf->head = thead->next;
124 tty_buffer_free(tty, thead);
130 * tty_buffer_flush - flush full tty buffers
133 * flush all the buffers containing receive data. If the buffer is
134 * being processed by flush_to_ldisc then we defer the processing
140 void tty_buffer_flush(struct tty_struct *tty)
142 struct tty_port *port = tty->port;
143 struct tty_bufhead *buf = &tty->buf;
146 spin_lock_irqsave(&buf->lock, flags);
148 /* If the data is being pushed to the tty layer then we can't
149 process it here. Instead set a flag and the flush_to_ldisc
150 path will process the flush request before it exits */
151 if (test_bit(TTYP_FLUSHING, &port->iflags)) {
152 set_bit(TTYP_FLUSHPENDING, &port->iflags);
153 spin_unlock_irqrestore(&buf->lock, flags);
154 wait_event(tty->read_wait,
155 test_bit(TTYP_FLUSHPENDING, &port->iflags) == 0);
158 __tty_buffer_flush(tty);
159 spin_unlock_irqrestore(&buf->lock, flags);
163 * tty_buffer_find - find a free tty buffer
164 * @tty: tty owning the buffer
165 * @size: characters wanted
167 * Locate an existing suitable tty buffer or if we are lacking one then
168 * allocate a new one. We round our buffers off in 256 character chunks
169 * to get better allocation behaviour.
171 * Locking: Caller must hold tty->buf.lock
174 static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
176 struct tty_buffer **tbh = &tty->buf.free;
177 while ((*tbh) != NULL) {
178 struct tty_buffer *t = *tbh;
179 if (t->size >= size) {
185 tty->buf.memory_used += t->size;
188 tbh = &((*tbh)->next);
190 /* Round the buffer size out */
191 size = (size + 0xFF) & ~0xFF;
192 return tty_buffer_alloc(tty, size);
193 /* Should possibly check if this fails for the largest buffer we
194 have queued and recycle that ? */
197 * __tty_buffer_request_room - grow tty buffer if needed
198 * @tty: tty structure
199 * @size: size desired
201 * Make at least size bytes of linear space available for the tty
202 * buffer. If we fail return the size we managed to find.
203 * Locking: Caller must hold tty->buf.lock
205 static int __tty_buffer_request_room(struct tty_struct *tty, size_t size)
207 struct tty_bufhead *buf = &tty->buf;
208 struct tty_buffer *b, *n;
210 /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
211 remove this conditional if its worth it. This would be invisible
215 left = b->size - b->used;
220 /* This is the slow path - looking for new buffers to use */
221 if ((n = tty_buffer_find(tty, size)) != NULL) {
237 * tty_buffer_request_room - grow tty buffer if needed
238 * @tty: tty structure
239 * @size: size desired
241 * Make at least size bytes of linear space available for the tty
242 * buffer. If we fail return the size we managed to find.
244 * Locking: Takes tty->buf.lock
246 int tty_buffer_request_room(struct tty_struct *tty, size_t size)
251 spin_lock_irqsave(&tty->buf.lock, flags);
252 length = __tty_buffer_request_room(tty, size);
253 spin_unlock_irqrestore(&tty->buf.lock, flags);
256 EXPORT_SYMBOL_GPL(tty_buffer_request_room);
259 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
260 * @tty: tty structure
262 * @flag: flag value for each character
265 * Queue a series of bytes to the tty buffering. All the characters
266 * passed are marked with the supplied flag. Returns the number added.
268 * Locking: Called functions may take tty->buf.lock
271 int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
272 const unsigned char *chars, char flag, size_t size)
274 struct tty_bufhead *buf = &tty->buf;
277 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
280 struct tty_buffer *tb;
282 spin_lock_irqsave(&buf->lock, flags);
283 space = __tty_buffer_request_room(tty, goal);
285 /* If there is no space then tb may be NULL */
286 if (unlikely(space == 0)) {
287 spin_unlock_irqrestore(&buf->lock, flags);
290 memcpy(tb->char_buf_ptr + tb->used, chars, space);
291 memset(tb->flag_buf_ptr + tb->used, flag, space);
293 spin_unlock_irqrestore(&buf->lock, flags);
296 /* There is a small chance that we need to split the data over
297 several buffers. If this is the case we must loop */
298 } while (unlikely(size > copied));
301 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
304 * tty_insert_flip_string_flags - Add characters to the tty buffer
305 * @tty: tty structure
310 * Queue a series of bytes to the tty buffering. For each character
311 * the flags array indicates the status of the character. Returns the
314 * Locking: Called functions may take tty->buf.lock
317 int tty_insert_flip_string_flags(struct tty_struct *tty,
318 const unsigned char *chars, const char *flags, size_t size)
320 struct tty_bufhead *buf = &tty->buf;
323 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
325 unsigned long __flags;
326 struct tty_buffer *tb;
328 spin_lock_irqsave(&buf->lock, __flags);
329 space = __tty_buffer_request_room(tty, goal);
331 /* If there is no space then tb may be NULL */
332 if (unlikely(space == 0)) {
333 spin_unlock_irqrestore(&buf->lock, __flags);
336 memcpy(tb->char_buf_ptr + tb->used, chars, space);
337 memcpy(tb->flag_buf_ptr + tb->used, flags, space);
339 spin_unlock_irqrestore(&buf->lock, __flags);
343 /* There is a small chance that we need to split the data over
344 several buffers. If this is the case we must loop */
345 } while (unlikely(size > copied));
348 EXPORT_SYMBOL(tty_insert_flip_string_flags);
351 * tty_schedule_flip - push characters to ldisc
352 * @tty: tty to push from
354 * Takes any pending buffers and transfers their ownership to the
355 * ldisc side of the queue. It then schedules those characters for
356 * processing by the line discipline.
357 * Note that this function can only be used when the low_latency flag
358 * is unset. Otherwise the workqueue won't be flushed.
360 * Locking: Takes tty->buf.lock
363 void tty_schedule_flip(struct tty_struct *tty)
365 struct tty_bufhead *buf = &tty->buf;
368 spin_lock_irqsave(&buf->lock, flags);
369 if (buf->tail != NULL)
370 buf->tail->commit = buf->tail->used;
371 spin_unlock_irqrestore(&buf->lock, flags);
372 schedule_work(&buf->work);
374 EXPORT_SYMBOL(tty_schedule_flip);
377 * tty_prepare_flip_string - make room for characters
379 * @chars: return pointer for character write area
380 * @size: desired size
382 * Prepare a block of space in the buffer for data. Returns the length
383 * available and buffer pointer to the space which is now allocated and
384 * accounted for as ready for normal characters. This is used for drivers
385 * that need their own block copy routines into the buffer. There is no
386 * guarantee the buffer is a DMA target!
388 * Locking: May call functions taking tty->buf.lock
391 int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
394 struct tty_bufhead *buf = &tty->buf;
397 struct tty_buffer *tb;
399 spin_lock_irqsave(&buf->lock, flags);
400 space = __tty_buffer_request_room(tty, size);
404 *chars = tb->char_buf_ptr + tb->used;
405 memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
408 spin_unlock_irqrestore(&buf->lock, flags);
411 EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
414 * tty_prepare_flip_string_flags - make room for characters
416 * @chars: return pointer for character write area
417 * @flags: return pointer for status flag write area
418 * @size: desired size
420 * Prepare a block of space in the buffer for data. Returns the length
421 * available and buffer pointer to the space which is now allocated and
422 * accounted for as ready for characters. This is used for drivers
423 * that need their own block copy routines into the buffer. There is no
424 * guarantee the buffer is a DMA target!
426 * Locking: May call functions taking tty->buf.lock
429 int tty_prepare_flip_string_flags(struct tty_struct *tty,
430 unsigned char **chars, char **flags, size_t size)
432 struct tty_bufhead *buf = &tty->buf;
434 unsigned long __flags;
435 struct tty_buffer *tb;
437 spin_lock_irqsave(&buf->lock, __flags);
438 space = __tty_buffer_request_room(tty, size);
442 *chars = tb->char_buf_ptr + tb->used;
443 *flags = tb->flag_buf_ptr + tb->used;
446 spin_unlock_irqrestore(&buf->lock, __flags);
449 EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
455 * @work: tty structure passed from work queue.
457 * This routine is called out of the software interrupt to flush data
458 * from the buffer chain to the line discipline.
460 * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
461 * while invoking the line discipline receive_buf method. The
462 * receive_buf method is single threaded for each tty instance.
465 static void flush_to_ldisc(struct work_struct *work)
467 struct tty_struct *tty =
468 container_of(work, struct tty_struct, buf.work);
469 struct tty_port *port = tty->port;
470 struct tty_bufhead *buf = &tty->buf;
472 struct tty_ldisc *disc;
474 disc = tty_ldisc_ref(tty);
475 if (disc == NULL) /* !TTY_LDISC */
478 spin_lock_irqsave(&buf->lock, flags);
480 if (!test_and_set_bit(TTYP_FLUSHING, &port->iflags)) {
481 struct tty_buffer *head;
482 while ((head = buf->head) != NULL) {
485 unsigned char *flag_buf;
487 count = head->commit - head->read;
489 if (head->next == NULL)
491 buf->head = head->next;
492 tty_buffer_free(tty, head);
495 /* Ldisc or user is trying to flush the buffers
496 we are feeding to the ldisc, stop feeding the
497 line discipline as we want to empty the queue */
498 if (test_bit(TTYP_FLUSHPENDING, &port->iflags))
500 if (!tty->receive_room)
502 if (count > tty->receive_room)
503 count = tty->receive_room;
504 char_buf = head->char_buf_ptr + head->read;
505 flag_buf = head->flag_buf_ptr + head->read;
507 spin_unlock_irqrestore(&buf->lock, flags);
508 disc->ops->receive_buf(tty, char_buf,
510 spin_lock_irqsave(&buf->lock, flags);
512 clear_bit(TTYP_FLUSHING, &port->iflags);
515 /* We may have a deferred request to flush the input buffer,
516 if so pull the chain under the lock and empty the queue */
517 if (test_bit(TTYP_FLUSHPENDING, &port->iflags)) {
518 __tty_buffer_flush(tty);
519 clear_bit(TTYP_FLUSHPENDING, &port->iflags);
520 wake_up(&tty->read_wait);
522 spin_unlock_irqrestore(&buf->lock, flags);
524 tty_ldisc_deref(disc);
531 * Push the terminal flip buffers to the line discipline.
533 * Must not be called from IRQ context.
535 void tty_flush_to_ldisc(struct tty_struct *tty)
537 if (!tty->low_latency)
538 flush_work(&tty->buf.work);
542 * tty_flip_buffer_push - terminal
545 * Queue a push of the terminal flip buffers to the line discipline. This
546 * function must not be called from IRQ context if tty->low_latency is set.
548 * In the event of the queue being busy for flipping the work will be
549 * held off and retried later.
551 * Locking: tty buffer lock. Driver locks in low latency mode.
554 void tty_flip_buffer_push(struct tty_struct *tty)
556 struct tty_bufhead *buf = &tty->buf;
559 spin_lock_irqsave(&buf->lock, flags);
560 if (buf->tail != NULL)
561 buf->tail->commit = buf->tail->used;
562 spin_unlock_irqrestore(&buf->lock, flags);
564 if (tty->low_latency)
565 flush_to_ldisc(&buf->work);
567 schedule_work(&buf->work);
569 EXPORT_SYMBOL(tty_flip_buffer_push);
572 * tty_buffer_init - prepare a tty buffer structure
573 * @tty: tty to initialise
575 * Set up the initial state of the buffer management for a tty device.
576 * Must be called before the other tty buffer functions are used.
581 void tty_buffer_init(struct tty_struct *tty)
583 struct tty_bufhead *buf = &tty->buf;
585 spin_lock_init(&buf->lock);
589 buf->memory_used = 0;
590 INIT_WORK(&buf->work, flush_to_ldisc);