forked from Minki/linux
[PATCH] new tty buffering locking fix
Change locking in the new tty buffering facility from using tty->read_lock, which is currently ignored by drivers and thus ineffective. New locking uses a new tty buffering specific lock enforced centrally in the tty buffering code. Two drivers (esp and cyclades) are updated to use the tty buffering functions instead of accessing tty buffering internals directly. This is required for the new locking to work. Minor checks for NULL buffers added to tty_prepare_flip_string/tty_prepare_flip_string_flags Signed-off-by: Paul Fulghum <paulkf@microgate.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
546cfdf47f
commit
808249ceba
@ -1233,7 +1233,7 @@ cyy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
}
|
||||
info->idle_stats.recv_idle = jiffies;
|
||||
}
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
tty_schedule_flip(tty);
|
||||
}
|
||||
/* end of service */
|
||||
cy_writeb(base_addr+(CyRIR<<index), (save_xir & 0x3f));
|
||||
@ -1606,7 +1606,7 @@ cyz_handle_rx(struct cyclades_port *info,
|
||||
}
|
||||
#endif
|
||||
info->idle_stats.recv_idle = jiffies;
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
tty_schedule_flip(tty);
|
||||
}
|
||||
/* Update rx_get */
|
||||
cy_writel(&buf_ctrl->rx_get, new_rx_get);
|
||||
@ -1809,7 +1809,7 @@ cyz_handle_cmd(struct cyclades_card *cinfo)
|
||||
if(delta_count)
|
||||
cy_sched_event(info, Cy_EVENT_DELTA_WAKEUP);
|
||||
if(special_count)
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
tty_schedule_flip(tty);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -359,7 +359,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
tty_schedule_flip(tty);
|
||||
|
||||
info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
|
||||
release_pio_buffer(pio_buf);
|
||||
@ -426,7 +426,7 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
|
||||
}
|
||||
tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag);
|
||||
}
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
tty_schedule_flip(tty);
|
||||
}
|
||||
|
||||
if (dma_bytes != num_bytes) {
|
||||
|
@ -253,6 +253,7 @@ static void tty_buffer_free_all(struct tty_struct *tty)
|
||||
|
||||
static void tty_buffer_init(struct tty_struct *tty)
|
||||
{
|
||||
spin_lock_init(&tty->buf.lock);
|
||||
tty->buf.head = NULL;
|
||||
tty->buf.tail = NULL;
|
||||
tty->buf.free = NULL;
|
||||
@ -266,6 +267,7 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
|
||||
p->used = 0;
|
||||
p->size = size;
|
||||
p->next = NULL;
|
||||
p->active = 0;
|
||||
p->char_buf_ptr = (char *)(p->data);
|
||||
p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
|
||||
/* printk("Flip create %p\n", p); */
|
||||
@ -312,25 +314,36 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
|
||||
|
||||
int tty_buffer_request_room(struct tty_struct *tty, size_t size)
|
||||
{
|
||||
struct tty_buffer *b = tty->buf.tail, *n;
|
||||
int left = 0;
|
||||
struct tty_buffer *b, *n;
|
||||
int left;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
|
||||
/* OPTIMISATION: We could keep a per tty "zero" sized buffer to
|
||||
remove this conditional if its worth it. This would be invisible
|
||||
to the callers */
|
||||
if(b != NULL)
|
||||
if ((b = tty->buf.tail) != NULL) {
|
||||
left = b->size - b->used;
|
||||
if(left >= size)
|
||||
return size;
|
||||
/* This is the slow path - looking for new buffers to use */
|
||||
n = tty_buffer_find(tty, size);
|
||||
if(n == NULL)
|
||||
return left;
|
||||
if(b != NULL)
|
||||
b->next = n;
|
||||
else
|
||||
tty->buf.head = n;
|
||||
tty->buf.tail = n;
|
||||
b->active = 1;
|
||||
} else
|
||||
left = 0;
|
||||
|
||||
if (left < size) {
|
||||
/* This is the slow path - looking for new buffers to use */
|
||||
if ((n = tty_buffer_find(tty, size)) != NULL) {
|
||||
if (b != NULL) {
|
||||
b->next = n;
|
||||
b->active = 0;
|
||||
} else
|
||||
tty->buf.head = n;
|
||||
tty->buf.tail = n;
|
||||
n->active = 1;
|
||||
} else
|
||||
size = left;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -396,10 +409,12 @@ EXPORT_SYMBOL_GPL(tty_insert_flip_string_flags);
|
||||
int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size)
|
||||
{
|
||||
int space = tty_buffer_request_room(tty, size);
|
||||
struct tty_buffer *tb = tty->buf.tail;
|
||||
*chars = tb->char_buf_ptr + tb->used;
|
||||
memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
|
||||
tb->used += space;
|
||||
if (likely(space)) {
|
||||
struct tty_buffer *tb = tty->buf.tail;
|
||||
*chars = tb->char_buf_ptr + tb->used;
|
||||
memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
|
||||
tb->used += space;
|
||||
}
|
||||
return space;
|
||||
}
|
||||
|
||||
@ -416,10 +431,12 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
|
||||
int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size)
|
||||
{
|
||||
int space = tty_buffer_request_room(tty, size);
|
||||
struct tty_buffer *tb = tty->buf.tail;
|
||||
*chars = tb->char_buf_ptr + tb->used;
|
||||
*flags = tb->flag_buf_ptr + tb->used;
|
||||
tb->used += space;
|
||||
if (likely(space)) {
|
||||
struct tty_buffer *tb = tty->buf.tail;
|
||||
*chars = tb->char_buf_ptr + tb->used;
|
||||
*flags = tb->flag_buf_ptr + tb->used;
|
||||
tb->used += space;
|
||||
}
|
||||
return space;
|
||||
}
|
||||
|
||||
@ -2747,20 +2764,20 @@ static void flush_to_ldisc(void *private_)
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
goto out;
|
||||
}
|
||||
spin_lock_irqsave(&tty->read_lock, flags);
|
||||
while((tbuf = tty->buf.head) != NULL) {
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
while((tbuf = tty->buf.head) != NULL && !tbuf->active) {
|
||||
tty->buf.head = tbuf->next;
|
||||
if (tty->buf.head == NULL)
|
||||
tty->buf.tail = NULL;
|
||||
spin_unlock_irqrestore(&tty->read_lock, flags);
|
||||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||
/* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */
|
||||
disc->receive_buf(tty, tbuf->char_buf_ptr,
|
||||
tbuf->flag_buf_ptr,
|
||||
tbuf->used);
|
||||
spin_lock_irqsave(&tty->read_lock, flags);
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
tty_buffer_free(tty, tbuf);
|
||||
}
|
||||
spin_unlock_irqrestore(&tty->read_lock, flags);
|
||||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||
out:
|
||||
tty_ldisc_deref(disc);
|
||||
}
|
||||
@ -2852,6 +2869,12 @@ EXPORT_SYMBOL(tty_get_baud_rate);
|
||||
|
||||
void tty_flip_buffer_push(struct tty_struct *tty)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
if (tty->buf.tail != NULL)
|
||||
tty->buf.tail->active = 0;
|
||||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||
|
||||
if (tty->low_latency)
|
||||
flush_to_ldisc((void *) tty);
|
||||
else
|
||||
|
@ -151,6 +151,11 @@ extern unsigned int keymap_count;
|
||||
|
||||
static inline void con_schedule_flip(struct tty_struct *t)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&t->buf.lock, flags);
|
||||
if (t->buf.tail != NULL)
|
||||
t->buf.tail->active = 0;
|
||||
spin_unlock_irqrestore(&t->buf.lock, flags);
|
||||
schedule_work(&t->buf.work);
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,7 @@ struct tty_buffer {
|
||||
unsigned char *flag_buf_ptr;
|
||||
int used;
|
||||
int size;
|
||||
int active;
|
||||
/* Data points here */
|
||||
unsigned long data[0];
|
||||
};
|
||||
@ -64,6 +65,7 @@ struct tty_buffer {
|
||||
struct tty_bufhead {
|
||||
struct work_struct work;
|
||||
struct semaphore pty_sem;
|
||||
spinlock_t lock;
|
||||
struct tty_buffer *head; /* Queue head */
|
||||
struct tty_buffer *tail; /* Active buffer */
|
||||
struct tty_buffer *free; /* Free queue head */
|
||||
|
@ -17,7 +17,7 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
|
||||
unsigned char ch, char flag)
|
||||
{
|
||||
struct tty_buffer *tb = tty->buf.tail;
|
||||
if (tb && tb->used < tb->size) {
|
||||
if (tb && tb->active && tb->used < tb->size) {
|
||||
tb->flag_buf_ptr[tb->used] = flag;
|
||||
tb->char_buf_ptr[tb->used++] = ch;
|
||||
return 1;
|
||||
@ -27,6 +27,11 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
|
||||
|
||||
_INLINE_ void tty_schedule_flip(struct tty_struct *tty)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
if (tty->buf.tail != NULL)
|
||||
tty->buf.tail->active = 0;
|
||||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user