alistair23-linux/sound/core/seq/seq_memory.c
Takashi Iwai d99a36f472 ALSA: seq: Fix leak of pool buffer at concurrent writes
When multiple concurrent writes happen on the ALSA sequencer device
right after the open, it may try to allocate vmalloc buffer for each
write and leak some of them.  It's because the presence check and the
assignment of the buffer is done outside the spinlock for the pool.

The fix is to move the check and the assignment into the spinlock.

(The current implementation is suboptimal, as there can be multiple
 unnecessary vmallocs because the allocation is done before the check
 in the spinlock.  But the pool size is already checked beforehand, so
 this isn't a big problem; that is, the only possible path is the
 multiple writes before any pool assignment, and practically seen, the
 current coverage should be "good enough".)

The issue was triggered by syzkaller fuzzer.

BugLink: http://lkml.kernel.org/r/CACT4Y+bSzazpXNvtAr=WXaL8hptqjHwqEyFA+VN2AWEx=aurkg@mail.gmail.com
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Tested-by: Dmitry Vyukov <dvyukov@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2016-02-15 16:26:52 +01:00

523 lines
12 KiB
C

/*
* ALSA sequencer Memory Manager
* Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
* Jaroslav Kysela <perex@perex.cz>
* 2000 by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <sound/core.h>
#include <sound/seq_kernel.h>
#include "seq_memory.h"
#include "seq_queue.h"
#include "seq_info.h"
#include "seq_lock.h"
static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
{
return pool->total_elements - atomic_read(&pool->counter);
}
static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
{
return snd_seq_pool_available(pool) >= pool->room;
}
/*
* Variable length event:
* The event like sysex uses variable length type.
* The external data may be stored in three different formats.
* 1) kernel space
* This is the normal case.
* ext.data.len = length
* ext.data.ptr = buffer pointer
* 2) user space
* When an event is generated via read(), the external data is
* kept in user space until expanded.
* ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
* ext.data.ptr = userspace pointer
* 3) chained cells
* When the variable length event is enqueued (in prioq or fifo),
* the external data is decomposed to several cells.
* ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
* ext.data.ptr = the additiona cell head
* -> cell.next -> cell.next -> ..
*/
/*
* exported:
* call dump function to expand external data.
*/
static int get_var_len(const struct snd_seq_event *event)
{
if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
return -EINVAL;
return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
}
int snd_seq_dump_var_event(const struct snd_seq_event *event,
snd_seq_dump_func_t func, void *private_data)
{
int len, err;
struct snd_seq_event_cell *cell;
if ((len = get_var_len(event)) <= 0)
return len;
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
char buf[32];
char __user *curptr = (char __force __user *)event->data.ext.ptr;
while (len > 0) {
int size = sizeof(buf);
if (len < size)
size = len;
if (copy_from_user(buf, curptr, size))
return -EFAULT;
err = func(private_data, buf, size);
if (err < 0)
return err;
curptr += size;
len -= size;
}
return 0;
}
if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
return func(private_data, event->data.ext.ptr, len);
cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
for (; len > 0 && cell; cell = cell->next) {
int size = sizeof(struct snd_seq_event);
if (len < size)
size = len;
err = func(private_data, &cell->event, size);
if (err < 0)
return err;
len -= size;
}
return 0;
}
EXPORT_SYMBOL(snd_seq_dump_var_event);
/*
* exported:
* expand the variable length event to linear buffer space.
*/
static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
{
memcpy(*bufptr, src, size);
*bufptr += size;
return 0;
}
static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
{
if (copy_to_user(*bufptr, src, size))
return -EFAULT;
*bufptr += size;
return 0;
}
int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
int in_kernel, int size_aligned)
{
int len, newlen;
int err;
if ((len = get_var_len(event)) < 0)
return len;
newlen = len;
if (size_aligned > 0)
newlen = roundup(len, size_aligned);
if (count < newlen)
return -EAGAIN;
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
if (! in_kernel)
return -EINVAL;
if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
return -EFAULT;
return newlen;
}
err = snd_seq_dump_var_event(event,
in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
(snd_seq_dump_func_t)seq_copy_in_user,
&buf);
return err < 0 ? err : newlen;
}
EXPORT_SYMBOL(snd_seq_expand_var_event);
/*
* release this cell, free extended data if available
*/
static inline void free_cell(struct snd_seq_pool *pool,
struct snd_seq_event_cell *cell)
{
cell->next = pool->free;
pool->free = cell;
atomic_dec(&pool->counter);
}
void snd_seq_cell_free(struct snd_seq_event_cell * cell)
{
unsigned long flags;
struct snd_seq_pool *pool;
if (snd_BUG_ON(!cell))
return;
pool = cell->pool;
if (snd_BUG_ON(!pool))
return;
spin_lock_irqsave(&pool->lock, flags);
free_cell(pool, cell);
if (snd_seq_ev_is_variable(&cell->event)) {
if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
struct snd_seq_event_cell *curp, *nextptr;
curp = cell->event.data.ext.ptr;
for (; curp; curp = nextptr) {
nextptr = curp->next;
curp->next = pool->free;
free_cell(pool, curp);
}
}
}
if (waitqueue_active(&pool->output_sleep)) {
/* has enough space now? */
if (snd_seq_output_ok(pool))
wake_up(&pool->output_sleep);
}
spin_unlock_irqrestore(&pool->lock, flags);
}
/*
* allocate an event cell.
*/
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
struct snd_seq_event_cell **cellp,
int nonblock, struct file *file)
{
struct snd_seq_event_cell *cell;
unsigned long flags;
int err = -EAGAIN;
wait_queue_t wait;
if (pool == NULL)
return -EINVAL;
*cellp = NULL;
init_waitqueue_entry(&wait, current);
spin_lock_irqsave(&pool->lock, flags);
if (pool->ptr == NULL) { /* not initialized */
pr_debug("ALSA: seq: pool is not initialized\n");
err = -EINVAL;
goto __error;
}
while (pool->free == NULL && ! nonblock && ! pool->closing) {
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&pool->output_sleep, &wait);
spin_unlock_irq(&pool->lock);
schedule();
spin_lock_irq(&pool->lock);
remove_wait_queue(&pool->output_sleep, &wait);
/* interrupted? */
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto __error;
}
}
if (pool->closing) { /* closing.. */
err = -ENOMEM;
goto __error;
}
cell = pool->free;
if (cell) {
int used;
pool->free = cell->next;
atomic_inc(&pool->counter);
used = atomic_read(&pool->counter);
if (pool->max_used < used)
pool->max_used = used;
pool->event_alloc_success++;
/* clear cell pointers */
cell->next = NULL;
err = 0;
} else
pool->event_alloc_failures++;
*cellp = cell;
__error:
spin_unlock_irqrestore(&pool->lock, flags);
return err;
}
/*
* duplicate the event to a cell.
* if the event has external data, the data is decomposed to additional
* cells.
*/
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
struct snd_seq_event_cell **cellp, int nonblock,
struct file *file)
{
int ncells, err;
unsigned int extlen;
struct snd_seq_event_cell *cell;
*cellp = NULL;
ncells = 0;
extlen = 0;
if (snd_seq_ev_is_variable(event)) {
extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
}
if (ncells >= pool->total_elements)
return -ENOMEM;
err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
if (err < 0)
return err;
/* copy the event */
cell->event = *event;
/* decompose */
if (snd_seq_ev_is_variable(event)) {
int len = extlen;
int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
struct snd_seq_event_cell *src, *tmp, *tail;
char *buf;
cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
cell->event.data.ext.ptr = NULL;
src = (struct snd_seq_event_cell *)event->data.ext.ptr;
buf = (char *)event->data.ext.ptr;
tail = NULL;
while (ncells-- > 0) {
int size = sizeof(struct snd_seq_event);
if (len < size)
size = len;
err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
if (err < 0)
goto __error;
if (cell->event.data.ext.ptr == NULL)
cell->event.data.ext.ptr = tmp;
if (tail)
tail->next = tmp;
tail = tmp;
/* copy chunk */
if (is_chained && src) {
tmp->event = src->event;
src = src->next;
} else if (is_usrptr) {
if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
err = -EFAULT;
goto __error;
}
} else {
memcpy(&tmp->event, buf, size);
}
buf += size;
len -= size;
}
}
*cellp = cell;
return 0;
__error:
snd_seq_cell_free(cell);
return err;
}
/* poll wait */
int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
poll_table *wait)
{
poll_wait(file, &pool->output_sleep, wait);
return snd_seq_output_ok(pool);
}
/* allocate room specified number of events */
int snd_seq_pool_init(struct snd_seq_pool *pool)
{
int cell;
struct snd_seq_event_cell *cellptr;
unsigned long flags;
if (snd_BUG_ON(!pool))
return -EINVAL;
cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
if (!cellptr)
return -ENOMEM;
/* add new cells to the free cell list */
spin_lock_irqsave(&pool->lock, flags);
if (pool->ptr) {
spin_unlock_irqrestore(&pool->lock, flags);
vfree(cellptr);
return 0;
}
pool->ptr = cellptr;
pool->free = NULL;
for (cell = 0; cell < pool->size; cell++) {
cellptr = pool->ptr + cell;
cellptr->pool = pool;
cellptr->next = pool->free;
pool->free = cellptr;
}
pool->room = (pool->size + 1) / 2;
/* init statistics */
pool->max_used = 0;
pool->total_elements = pool->size;
spin_unlock_irqrestore(&pool->lock, flags);
return 0;
}
/* remove events */
int snd_seq_pool_done(struct snd_seq_pool *pool)
{
unsigned long flags;
struct snd_seq_event_cell *ptr;
int max_count = 5 * HZ;
if (snd_BUG_ON(!pool))
return -EINVAL;
/* wait for closing all threads */
spin_lock_irqsave(&pool->lock, flags);
pool->closing = 1;
spin_unlock_irqrestore(&pool->lock, flags);
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);
while (atomic_read(&pool->counter) > 0) {
if (max_count == 0) {
pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
break;
}
schedule_timeout_uninterruptible(1);
max_count--;
}
/* release all resources */
spin_lock_irqsave(&pool->lock, flags);
ptr = pool->ptr;
pool->ptr = NULL;
pool->free = NULL;
pool->total_elements = 0;
spin_unlock_irqrestore(&pool->lock, flags);
vfree(ptr);
spin_lock_irqsave(&pool->lock, flags);
pool->closing = 0;
spin_unlock_irqrestore(&pool->lock, flags);
return 0;
}
/* init new memory pool */
struct snd_seq_pool *snd_seq_pool_new(int poolsize)
{
struct snd_seq_pool *pool;
/* create pool block */
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return NULL;
spin_lock_init(&pool->lock);
pool->ptr = NULL;
pool->free = NULL;
pool->total_elements = 0;
atomic_set(&pool->counter, 0);
pool->closing = 0;
init_waitqueue_head(&pool->output_sleep);
pool->size = poolsize;
/* init statistics */
pool->max_used = 0;
return pool;
}
/* remove memory pool */
int snd_seq_pool_delete(struct snd_seq_pool **ppool)
{
struct snd_seq_pool *pool = *ppool;
*ppool = NULL;
if (pool == NULL)
return 0;
snd_seq_pool_done(pool);
kfree(pool);
return 0;
}
/* initialize sequencer memory */
int __init snd_sequencer_memory_init(void)
{
return 0;
}
/* release sequencer memory */
void __exit snd_sequencer_memory_done(void)
{
}
/* exported to seq_clientmgr.c */
void snd_seq_info_pool(struct snd_info_buffer *buffer,
struct snd_seq_pool *pool, char *space)
{
if (pool == NULL)
return;
snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
}