aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi Iwai2018-02-12 08:20:51 -0600
committerGreg Kroah-Hartman2018-02-22 08:45:01 -0600
commitb374197df2deb08fec55d48763711ea1df8efde7 (patch)
tree2dab03ecf05dcfa3e806412e7527a4bb54b04724
parent5e5d1372ba7cfa0cf040a4e038e689f6f16e6470 (diff)
downloadkernel-omap-b374197df2deb08fec55d48763711ea1df8efde7.tar.gz
kernel-omap-b374197df2deb08fec55d48763711ea1df8efde7.tar.xz
kernel-omap-b374197df2deb08fec55d48763711ea1df8efde7.zip
ALSA: seq: Fix racy pool initializations
commit d15d662e89fc667b90cd294b0eb45694e33144da upstream. ALSA sequencer core initializes the event pool on demand by invoking snd_seq_pool_init() when the first write happens and the pool is empty. Meanwhile user can reset the pool size manually via ioctl concurrently, and this may lead to UAF or out-of-bound accesses since the function tries to vmalloc / vfree the buffer. A simple fix is to just wrap the snd_seq_pool_init() call with the recently introduced client->ioctl_mutex; as the calls for snd_seq_pool_init() from other side are always protected with this mutex, we can avoid the race. Reported-by: 范龙飞 <long7573@126.com> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--sound/core/seq/seq_clientmgr.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index dacc62fe5a58..167b943469ab 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1012,7 +1012,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1012{ 1012{
1013 struct snd_seq_client *client = file->private_data; 1013 struct snd_seq_client *client = file->private_data;
1014 int written = 0, len; 1014 int written = 0, len;
1015 int err = -EINVAL; 1015 int err;
1016 struct snd_seq_event event; 1016 struct snd_seq_event event;
1017 1017
1018 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) 1018 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1027,11 +1027,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1027 1027
1028 /* allocate the pool now if the pool is not allocated yet */ 1028 /* allocate the pool now if the pool is not allocated yet */
1029 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { 1029 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
1030 if (snd_seq_pool_init(client->pool) < 0) 1030 mutex_lock(&client->ioctl_mutex);
1031 err = snd_seq_pool_init(client->pool);
1032 mutex_unlock(&client->ioctl_mutex);
1033 if (err < 0)
1031 return -ENOMEM; 1034 return -ENOMEM;
1032 } 1035 }
1033 1036
1034 /* only process whole events */ 1037 /* only process whole events */
1038 err = -EINVAL;
1035 while (count >= sizeof(struct snd_seq_event)) { 1039 while (count >= sizeof(struct snd_seq_event)) {
1036 /* Read in the event header from the user */ 1040 /* Read in the event header from the user */
1037 len = sizeof(event); 1041 len = sizeof(event);