diff options
Diffstat (limited to 'drivers/tty/tty_buffer.c')
-rw-r--r-- | drivers/tty/tty_buffer.c | 519 |
1 files changed, 262 insertions, 257 deletions
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index d8210ca00720..c043136fbe51 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -16,6 +16,91 @@ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/module.h> +#include <linux/ratelimit.h> + + +#define MIN_TTYB_SIZE 256 +#define TTYB_ALIGN_MASK 255 + +/* + * Byte threshold to limit memory consumption for flip buffers. + * The actual memory limit is > 2x this amount. + */ +#define TTYB_MEM_LIMIT 65536 + +/* + * We default to dicing tty buffer allocations to this many characters + * in order to avoid multiple page allocations. We know the size of + * tty_buffer itself but it must also be taken into account that the + * the buffer is 256 byte aligned. See tty_buffer_find for the allocation + * logic this must match + */ + +#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) + + +/** + * tty_buffer_lock_exclusive - gain exclusive access to buffer + * tty_buffer_unlock_exclusive - release exclusive access + * + * @port - tty_port owning the flip buffer + * + * Guarantees safe use of the line discipline's receive_buf() method by + * excluding the buffer work and any pending flush from using the flip + * buffer. Data can continue to be added concurrently to the flip buffer + * from the driver side. + * + * On release, the buffer work is restarted if there is data in the + * flip buffer + */ + +void tty_buffer_lock_exclusive(struct tty_port *port) +{ + struct tty_bufhead *buf = &port->buf; + + atomic_inc(&buf->priority); + mutex_lock(&buf->lock); +} + +void tty_buffer_unlock_exclusive(struct tty_port *port) +{ + struct tty_bufhead *buf = &port->buf; + int restart; + + restart = buf->head->commit != buf->head->read; + + atomic_dec(&buf->priority); + mutex_unlock(&buf->lock); + if (restart) + queue_work(system_unbound_wq, &buf->work); +} + +/** + * tty_buffer_space_avail - return unused buffer space + * @port - tty_port owning the flip buffer + * + * Returns the # of bytes which can be written by the driver without + * reaching the buffer limit. + * + * Note: this does not guarantee that memory is available to write + * the returned # of bytes (use tty_prepare_flip_string_xxx() to + * pre-allocate if memory guarantee is required). + */ + +int tty_buffer_space_avail(struct tty_port *port) +{ + int space = TTYB_MEM_LIMIT - atomic_read(&port->buf.memory_used); + return max(space, 0); +} + +static void tty_buffer_reset(struct tty_buffer *p, size_t size) +{ + p->used = 0; + p->size = size; + p->next = NULL; + p->commit = 0; + p->read = 0; +} /** * tty_buffer_free_all - free buffers used by a tty @@ -23,23 +108,28 @@ * * Remove all the buffers pending on a tty whether queued with data * or in the free ring. Must be called when the tty is no longer in use - * - * Locking: none */ -void tty_buffer_free_all(struct tty_struct *tty) +void tty_buffer_free_all(struct tty_port *port) { - struct tty_buffer *thead; - while ((thead = tty->buf.head) != NULL) { - tty->buf.head = thead->next; - kfree(thead); + struct tty_bufhead *buf = &port->buf; + struct tty_buffer *p, *next; + struct llist_node *llist; + + while ((p = buf->head) != NULL) { + buf->head = p->next; + if (p->size > 0) + kfree(p); } - while ((thead = tty->buf.free) != NULL) { - tty->buf.free = thead->next; - kfree(thead); - } - tty->buf.tail = NULL; - tty->buf.memory_used = 0; + llist = llist_del_all(&buf->free); + llist_for_each_entry_safe(p, next, llist, free) + kfree(p); + + tty_buffer_reset(&buf->sentinel, 0); + buf->head = &buf->sentinel; + buf->tail = &buf->sentinel; + + atomic_set(&buf->memory_used, 0); } /** @@ -48,29 +138,39 @@ void tty_buffer_free_all(struct tty_struct *tty) * @size: desired size (characters) * * Allocate a new tty buffer to hold the desired number of characters. + * We round our buffers off in 256 character chunks to get better + * allocation behaviour. * Return NULL if out of memory or the allocation would exceed the * per device queue - * - * Locking: Caller must hold tty->buf.lock */ -static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) +static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) { + struct llist_node *free; struct tty_buffer *p; - if (tty->buf.memory_used + size > 65536) + /* Round the buffer size out */ + size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); + + if (size <= MIN_TTYB_SIZE) { + free = llist_del_first(&port->buf.free); + if (free) { + p = llist_entry(free, struct tty_buffer, free); + goto found; + } + } + + /* Should possibly check if this fails for the largest buffer we + have queued and recycle that ? */ + if (atomic_read(&port->buf.memory_used) > TTYB_MEM_LIMIT) return NULL; p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); if (p == NULL) return NULL; - p->used = 0; - p->size = size; - p->next = NULL; - p->commit = 0; - p->read = 0; - p->char_buf_ptr = (char *)(p->data); - p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; - tty->buf.memory_used += size; + +found: + tty_buffer_reset(p, size); + atomic_add(size, &port->buf.memory_used); return p; } @@ -81,44 +181,19 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) * * Free a tty buffer, or add it to the free list according to our * internal strategy - * - * Locking: Caller must hold tty->buf.lock */ -static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) +static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b) { + struct tty_bufhead *buf = &port->buf; + /* Dumb strategy for now - should keep some stats */ - tty->buf.memory_used -= b->size; - WARN_ON(tty->buf.memory_used < 0); + WARN_ON(atomic_sub_return(b->size, &buf->memory_used) < 0); - if (b->size >= 512) + if (b->size > MIN_TTYB_SIZE) kfree(b); - else { - b->next = tty->buf.free; - tty->buf.free = b; - } -} - -/** - * __tty_buffer_flush - flush full tty buffers - * @tty: tty to flush - * - * flush all the buffers containing receive data. Caller must - * hold the buffer lock and must have ensured no parallel flush to - * ldisc is running. - * - * Locking: Caller must hold tty->buf.lock - */ - -static void __tty_buffer_flush(struct tty_struct *tty) -{ - struct tty_buffer *thead; - - while ((thead = tty->buf.head) != NULL) { - tty->buf.head = thead->next; - tty_buffer_free(tty, thead); - } - tty->buf.tail = NULL; + else if (b->size > 0) + llist_add(&b->free, &buf->free); } /** @@ -129,61 +204,26 @@ static void __tty_buffer_flush(struct tty_struct *tty) * being processed by flush_to_ldisc then we defer the processing * to that function * - * Locking: none + * Locking: takes buffer lock to ensure single-threaded flip buffer + * 'consumer' */ void tty_buffer_flush(struct tty_struct *tty) { - unsigned long flags; - spin_lock_irqsave(&tty->buf.lock, flags); - - /* If the data is being pushed to the tty layer then we can't - process it here. Instead set a flag and the flush_to_ldisc - path will process the flush request before it exits */ - if (test_bit(TTY_FLUSHING, &tty->flags)) { - set_bit(TTY_FLUSHPENDING, &tty->flags); - spin_unlock_irqrestore(&tty->buf.lock, flags); - wait_event(tty->read_wait, - test_bit(TTY_FLUSHPENDING, &tty->flags) == 0); - return; - } else - __tty_buffer_flush(tty); - spin_unlock_irqrestore(&tty->buf.lock, flags); -} + struct tty_port *port = tty->port; + struct tty_bufhead *buf = &port->buf; + struct tty_buffer *next; -/** - * tty_buffer_find - find a free tty buffer - * @tty: tty owning the buffer - * @size: characters wanted - * - * Locate an existing suitable tty buffer or if we are lacking one then - * allocate a new one. We round our buffers off in 256 character chunks - * to get better allocation behaviour. - * - * Locking: Caller must hold tty->buf.lock - */ + atomic_inc(&buf->priority); -static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) -{ - struct tty_buffer **tbh = &tty->buf.free; - while ((*tbh) != NULL) { - struct tty_buffer *t = *tbh; - if (t->size >= size) { - *tbh = t->next; - t->next = NULL; - t->used = 0; - t->commit = 0; - t->read = 0; - tty->buf.memory_used += t->size; - return t; - } - tbh = &((*tbh)->next); + mutex_lock(&buf->lock); + while ((next = buf->head->next) != NULL) { + tty_buffer_free(port, buf->head); + buf->head = next; } - /* Round the buffer size out */ - size = (size + 0xFF) & ~0xFF; - return tty_buffer_alloc(tty, size); - /* Should possibly check if this fails for the largest buffer we - have queued and recycle that ? */ + buf->head->read = buf->head->commit; + atomic_dec(&buf->priority); + mutex_unlock(&buf->lock); } /** @@ -193,69 +233,53 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) * * Make at least size bytes of linear space available for the tty * buffer. If we fail return the size we managed to find. - * - * Locking: Takes tty->buf.lock */ -int tty_buffer_request_room(struct tty_struct *tty, size_t size) +int tty_buffer_request_room(struct tty_port *port, size_t size) { + struct tty_bufhead *buf = &port->buf; struct tty_buffer *b, *n; int left; - unsigned long flags; - spin_lock_irqsave(&tty->buf.lock, flags); - - /* OPTIMISATION: We could keep a per tty "zero" sized buffer to - remove this conditional if its worth it. This would be invisible - to the callers */ - if ((b = tty->buf.tail) != NULL) - left = b->size - b->used; - else - left = 0; + b = buf->tail; + left = b->size - b->used; if (left < size) { /* This is the slow path - looking for new buffers to use */ - if ((n = tty_buffer_find(tty, size)) != NULL) { - if (b != NULL) { - b->next = n; - b->commit = b->used; - } else - tty->buf.head = n; - tty->buf.tail = n; + if ((n = tty_buffer_alloc(port, size)) != NULL) { + buf->tail = n; + b->commit = b->used; + smp_mb(); + b->next = n; } else size = left; } - - spin_unlock_irqrestore(&tty->buf.lock, flags); return size; } EXPORT_SYMBOL_GPL(tty_buffer_request_room); /** * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer - * @tty: tty structure + * @port: tty port * @chars: characters * @flag: flag value for each character * @size: size * * Queue a series of bytes to the tty buffering. All the characters * passed are marked with the supplied flag. Returns the number added. - * - * Locking: Called functions may take tty->buf.lock */ -int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, +int tty_insert_flip_string_fixed_flag(struct tty_port *port, const unsigned char *chars, char flag, size_t size) { int copied = 0; do { int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); - int space = tty_buffer_request_room(tty, goal); - struct tty_buffer *tb = tty->buf.tail; - /* If there is no space then tb may be NULL */ + int space = tty_buffer_request_room(port, goal); + struct tty_buffer *tb = port->buf.tail; if (unlikely(space == 0)) break; - memcpy(tb->char_buf_ptr + tb->used, chars, space); - memset(tb->flag_buf_ptr + tb->used, flag, space); + memcpy(char_buf_ptr(tb, tb->used), chars, space); + memset(flag_buf_ptr(tb, tb->used), flag, space); tb->used += space; copied += space; chars += space; @@ -268,7 +292,7 @@ EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); /** * tty_insert_flip_string_flags - Add characters to the tty buffer - * @tty: tty structure + * @port: tty port * @chars: characters * @flags: flag bytes * @size: size @@ -276,23 +300,20 @@ EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); * Queue a series of bytes to the tty buffering. For each character * the flags array indicates the status of the character. Returns the * number added. - * - * Locking: Called functions may take tty->buf.lock */ -int tty_insert_flip_string_flags(struct tty_struct *tty, +int tty_insert_flip_string_flags(struct tty_port *port, const unsigned char *chars, const char *flags, size_t size) { int copied = 0; do { int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); - int space = tty_buffer_request_room(tty, goal); - struct tty_buffer *tb = tty->buf.tail; - /* If there is no space then tb may be NULL */ + int space = tty_buffer_request_room(port, goal); + struct tty_buffer *tb = port->buf.tail; if (unlikely(space == 0)) break; - memcpy(tb->char_buf_ptr + tb->used, chars, space); - memcpy(tb->flag_buf_ptr + tb->used, flags, space); + memcpy(char_buf_ptr(tb, tb->used), chars, space); + memcpy(flag_buf_ptr(tb, tb->used), flags, space); tb->used += space; copied += space; chars += space; @@ -306,29 +327,28 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags); /** * tty_schedule_flip - push characters to ldisc - * @tty: tty to push from + * @port: tty port to push from * * Takes any pending buffers and transfers their ownership to the * ldisc side of the queue. It then schedules those characters for * processing by the line discipline. - * - * Locking: Takes tty->buf.lock + * Note that this function can only be used when the low_latency flag + * is unset. Otherwise the workqueue won't be flushed. */ -void tty_schedule_flip(struct tty_struct *tty) +void tty_schedule_flip(struct tty_port *port) { - unsigned long flags; - spin_lock_irqsave(&tty->buf.lock, flags); - if (tty->buf.tail != NULL) - tty->buf.tail->commit = tty->buf.tail->used; - spin_unlock_irqrestore(&tty->buf.lock, flags); - schedule_delayed_work(&tty->buf.work, 1); + struct tty_bufhead *buf = &port->buf; + WARN_ON(port->low_latency); + + buf->tail->commit = buf->tail->used; + schedule_work(&buf->work); } EXPORT_SYMBOL(tty_schedule_flip); /** * tty_prepare_flip_string - make room for characters - * @tty: tty + * @port: tty port * @chars: return pointer for character write area * @size: desired size * @@ -337,18 +357,16 @@ EXPORT_SYMBOL(tty_schedule_flip); * accounted for as ready for normal characters. This is used for drivers * that need their own block copy routines into the buffer. There is no * guarantee the buffer is a DMA target! - * - * Locking: May call functions taking tty->buf.lock */ -int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, - size_t size) +int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, + size_t size) { - int space = tty_buffer_request_room(tty, size); + int space = tty_buffer_request_room(port, size); if (likely(space)) { - struct tty_buffer *tb = tty->buf.tail; - *chars = tb->char_buf_ptr + tb->used; - memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); + struct tty_buffer *tb = port->buf.tail; + *chars = char_buf_ptr(tb, tb->used); + memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); tb->used += space; } return space; @@ -357,7 +375,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); /** * tty_prepare_flip_string_flags - make room for characters - * @tty: tty + * @port: tty port * @chars: return pointer for character write area * @flags: return pointer for status flag write area * @size: desired size @@ -367,18 +385,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); * accounted for as ready for characters. This is used for drivers * that need their own block copy routines into the buffer. There is no * guarantee the buffer is a DMA target! - * - * Locking: May call functions taking tty->buf.lock */ -int tty_prepare_flip_string_flags(struct tty_struct *tty, +int tty_prepare_flip_string_flags(struct tty_port *port, unsigned char **chars, char **flags, size_t size) { - int space = tty_buffer_request_room(tty, size); + int space = tty_buffer_request_room(port, size); if (likely(space)) { - struct tty_buffer *tb = tty->buf.tail; - *chars = tb->char_buf_ptr + tb->used; - *flags = tb->flag_buf_ptr + tb->used; + struct tty_buffer *tb = port->buf.tail; + *chars = char_buf_ptr(tb, tb->used); + *flags = flag_buf_ptr(tb, tb->used); tb->used += space; } return space; @@ -386,6 +402,23 @@ int tty_prepare_flip_string_flags(struct tty_struct *tty, EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); +static int +receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) +{ + struct tty_ldisc *disc = tty->ldisc; + unsigned char *p = char_buf_ptr(head, head->read); + char *f = flag_buf_ptr(head, head->read); + + if (disc->ops->receive_buf2) + count = disc->ops->receive_buf2(tty, p, f, count); + else { + count = min_t(int, count, tty->receive_room); + if (count) + disc->ops->receive_buf(tty, p, f, count); + } + head->read += count; + return count; +} /** * flush_to_ldisc @@ -394,79 +427,52 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); * This routine is called out of the software interrupt to flush data * from the buffer chain to the line discipline. * - * Locking: holds tty->buf.lock to guard buffer list. Drops the lock - * while invoking the line discipline receive_buf method. The - * receive_buf method is single threaded for each tty instance. + * The receive_buf method is single threaded for each tty instance. + * + * Locking: takes buffer lock to ensure single-threaded flip buffer + * 'consumer' */ static void flush_to_ldisc(struct work_struct *work) { - struct tty_struct *tty = - container_of(work, struct tty_struct, buf.work.work); - unsigned long flags; + struct tty_port *port = container_of(work, struct tty_port, buf.work); + struct tty_bufhead *buf = &port->buf; + struct tty_struct *tty; struct tty_ldisc *disc; + tty = port->itty; + if (tty == NULL) + return; + disc = tty_ldisc_ref(tty); - if (disc == NULL) /* !TTY_LDISC */ + if (disc == NULL) return; - spin_lock_irqsave(&tty->buf.lock, flags); - - if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { - struct tty_buffer *head, *tail = tty->buf.tail; - int seen_tail = 0; - while ((head = tty->buf.head) != NULL) { - int count; - char *char_buf; - unsigned char *flag_buf; - - count = head->commit - head->read; - if (!count) { - if (head->next == NULL) - break; - /* - There's a possibility tty might get new buffer - added during the unlock window below. We could - end up spinning in here forever hogging the CPU - completely. To avoid this let's have a rest each - time we processed the tail buffer. - */ - if (tail == head) - seen_tail = 1; - tty->buf.head = head->next; - tty_buffer_free(tty, head); - continue; - } - /* Ldisc or user is trying to flush the buffers - we are feeding to the ldisc, stop feeding the - line discipline as we want to empty the queue */ - if (test_bit(TTY_FLUSHPENDING, &tty->flags)) - break; - if (!tty->receive_room || seen_tail) { - schedule_delayed_work(&tty->buf.work, 1); + mutex_lock(&buf->lock); + + while (1) { + struct tty_buffer *head = buf->head; + int count; + + /* Ldisc or user is trying to gain exclusive access */ + if (atomic_read(&buf->priority)) + break; + + count = head->commit - head->read; + if (!count) { + if (head->next == NULL) break; - } - if (count > tty->receive_room) - count = tty->receive_room; - char_buf = head->char_buf_ptr + head->read; - flag_buf = head->flag_buf_ptr + head->read; - head->read += count; - spin_unlock_irqrestore(&tty->buf.lock, flags); - disc->ops->receive_buf(tty, char_buf, - flag_buf, count); - spin_lock_irqsave(&tty->buf.lock, flags); + buf->head = head->next; + tty_buffer_free(port, head); + continue; } - clear_bit(TTY_FLUSHING, &tty->flags); - } - /* We may have a deferred request to flush the input buffer, - if so pull the chain under the lock and empty the queue */ - if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { - __tty_buffer_flush(tty); - clear_bit(TTY_FLUSHPENDING, &tty->flags); - wake_up(&tty->read_wait); + count = receive_buf(tty, head, count); + if (!count) + break; } - spin_unlock_irqrestore(&tty->buf.lock, flags); + + mutex_unlock(&buf->lock); tty_ldisc_deref(disc); } @@ -481,34 +487,32 @@ static void flush_to_ldisc(struct work_struct *work) */ void tty_flush_to_ldisc(struct tty_struct *tty) { - flush_delayed_work(&tty->buf.work); + if (!tty->port->low_latency) + flush_work(&tty->port->buf.work); } /** * tty_flip_buffer_push - terminal - * @tty: tty to push + * @port: tty port to push * * Queue a push of the terminal flip buffers to the line discipline. This - * function must not be called from IRQ context if tty->low_latency is set. + * function must not be called from IRQ context if port->low_latency is + * set. * * In the event of the queue being busy for flipping the work will be * held off and retried later. - * - * Locking: tty buffer lock. Driver locks in low latency mode. */ -void tty_flip_buffer_push(struct tty_struct *tty) +void tty_flip_buffer_push(struct tty_port *port) { - unsigned long flags; - spin_lock_irqsave(&tty->buf.lock, flags); - if (tty->buf.tail != NULL) - tty->buf.tail->commit = tty->buf.tail->used; - spin_unlock_irqrestore(&tty->buf.lock, flags); - - if (tty->low_latency) - flush_to_ldisc(&tty->buf.work.work); + struct tty_bufhead *buf = &port->buf; + + buf->tail->commit = buf->tail->used; + + if (port->low_latency) + flush_to_ldisc(&buf->work); else - schedule_delayed_work(&tty->buf.work, 1); + schedule_work(&buf->work); } EXPORT_SYMBOL(tty_flip_buffer_push); @@ -518,17 +522,18 @@ EXPORT_SYMBOL(tty_flip_buffer_push); * * Set up the initial state of the buffer management for a tty device. * Must be called before the other tty buffer functions are used. - * - * Locking: none */ -void tty_buffer_init(struct tty_struct *tty) +void tty_buffer_init(struct tty_port *port) { - spin_lock_init(&tty->buf.lock); - tty->buf.head = NULL; - tty->buf.tail = NULL; - tty->buf.free = NULL; - tty->buf.memory_used = 0; - INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); + struct tty_bufhead *buf = &port->buf; + + mutex_init(&buf->lock); + tty_buffer_reset(&buf->sentinel, 0); + buf->head = &buf->sentinel; + buf->tail = &buf->sentinel; + init_llist_head(&buf->free); + atomic_set(&buf->memory_used, 0); + atomic_set(&buf->priority, 0); + INIT_WORK(&buf->work, flush_to_ldisc); } - |