1
0
Fork 0

ALSA: seq: More protection for concurrent write and ioctl races

commit 7bd8009156 upstream.

This patch is an attempt for further hardening against races between
the concurrent write and ioctls.  The previous fix d15d662e89
("ALSA: seq: Fix racy pool initializations") covered the race of the
pool initialization at writer and the pool resize ioctl by the
client->ioctl_mutex (CVE-2018-1000004).  However, basically this mutex
should be applied more widely to the whole write operation for
avoiding the unexpected pool operations by another thread.

The only change outside snd_seq_write() is the additional mutex
argument to helper functions, so that we can unlock / relock the given
mutex temporarily during schedule() call for blocking write.

Fixes: d15d662e89 ("ALSA: seq: Fix racy pool initializations")
Reported-by: 范龙飞 <long7573@126.com>
Reported-by: Nicolai Stange <nstange@suse.de>
Reviewed-and-tested-by: Nicolai Stange <nstange@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
pull/10/head
Takashi Iwai 2018-03-05 22:06:09 +01:00 committed by Greg Kroah-Hartman
parent d9c724729d
commit 6d3a7dcba8
4 changed files with 24 additions and 13 deletions

View File

@ -906,7 +906,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
struct snd_seq_event *event,
struct file *file, int blocking,
int atomic, int hop)
int atomic, int hop,
struct mutex *mutexp)
{
struct snd_seq_event_cell *cell;
int err;
@ -944,7 +945,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
return -ENXIO; /* queue is not allocated */
/* allocate an event cell */
err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
file, mutexp);
if (err < 0)
return err;
@ -1013,12 +1015,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
return -ENXIO;
/* allocate the pool now if the pool is not allocated yet */
mutex_lock(&client->ioctl_mutex);
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
mutex_lock(&client->ioctl_mutex);
err = snd_seq_pool_init(client->pool);
mutex_unlock(&client->ioctl_mutex);
if (err < 0)
return -ENOMEM;
goto out;
}
/* only process whole events */
@ -1069,7 +1070,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
/* ok, enqueue it */
err = snd_seq_client_enqueue_event(client, &event, file,
!(file->f_flags & O_NONBLOCK),
0, 0);
0, 0, &client->ioctl_mutex);
if (err < 0)
break;
@ -1080,6 +1081,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
written += len;
}
out:
mutex_unlock(&client->ioctl_mutex);
return written ? written : err;
}
@ -2259,7 +2262,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
if (! cptr->accept_output)
result = -EPERM;
else /* send it */
result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
atomic, hop, NULL);
snd_seq_client_unlock(cptr);
return result;

View File

@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
return -EINVAL;
snd_use_lock_use(&f->use_lock);
err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
if (err < 0) {
if ((err == -ENOMEM) || (err == -EAGAIN))
atomic_inc(&f->overflow);

View File

@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
*/
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
struct snd_seq_event_cell **cellp,
int nonblock, struct file *file)
int nonblock, struct file *file,
struct mutex *mutexp)
{
struct snd_seq_event_cell *cell;
unsigned long flags;
@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&pool->output_sleep, &wait);
spin_unlock_irq(&pool->lock);
if (mutexp)
mutex_unlock(mutexp);
schedule();
if (mutexp)
mutex_lock(mutexp);
spin_lock_irq(&pool->lock);
remove_wait_queue(&pool->output_sleep, &wait);
/* interrupted? */
@ -287,7 +292,7 @@ __error:
*/
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
struct snd_seq_event_cell **cellp, int nonblock,
struct file *file)
struct file *file, struct mutex *mutexp)
{
int ncells, err;
unsigned int extlen;
@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
if (ncells >= pool->total_elements)
return -ENOMEM;
err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
if (err < 0)
return err;
@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
int size = sizeof(struct snd_seq_event);
if (len < size)
size = len;
err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
mutexp);
if (err < 0)
goto __error;
if (cell->event.data.ext.ptr == NULL)

View File

@ -66,7 +66,8 @@ struct snd_seq_pool {
void snd_seq_cell_free(struct snd_seq_event_cell *cell);
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
struct snd_seq_event_cell **cellp, int nonblock,
struct file *file, struct mutex *mutexp);
/* return number of unused (free) cells */
static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)