1
0
Fork 0

fsnotify: Allocate overflow events with proper type

Commit 7053aee26a "fsnotify: do not share events between notification
groups" used overflow event statically allocated in a group with the
size of the generic notification event. This causes problems because
some code looks at type specific parts of event structure and gets
confused by a random data it sees there and causes crashes.

Fix the problem by allocating overflow event with type corresponding to
the group type so code cannot get confused.

Signed-off-by: Jan Kara <jack@suse.cz>
hifive-unleashed-5.1
Jan Kara 2014-02-21 19:14:11 +01:00
parent 482ef06c5e
commit ff57cd5863
5 changed files with 35 additions and 4 deletions

View File

@ -698,6 +698,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
struct fsnotify_group *group;
int f_flags, fd;
struct user_struct *user;
struct fanotify_event_info *oevent;
pr_debug("%s: flags=%d event_f_flags=%d\n",
__func__, flags, event_f_flags);
@ -730,8 +731,20 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
if (unlikely(!oevent)) {
fd = -ENOMEM;
goto out_destroy_group;
}
group->overflow_event = &oevent->fse;
fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
oevent->tgid = get_pid(task_tgid(current));
oevent->path.mnt = NULL;
oevent->path.dentry = NULL;
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
oevent->response = 0;
mutex_init(&group->fanotify_data.access_mutex);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);

View File

@ -55,6 +55,13 @@ void fsnotify_destroy_group(struct fsnotify_group *group)
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
/*
* Destroy overflow event (we cannot use fsnotify_destroy_event() as
* that deliberately ignores overflow events.
*/
if (group->overflow_event)
group->ops->free_event(group->overflow_event);
fsnotify_put_group(group);
}
@ -99,7 +106,6 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
fsnotify_init_event(&group->overflow_event, NULL, FS_Q_OVERFLOW);
return group;
}

View File

@ -633,11 +633,23 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
static struct fsnotify_group *inotify_new_group(unsigned int max_events)
{
struct fsnotify_group *group;
struct inotify_event_info *oevent;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
if (unlikely(!oevent)) {
fsnotify_destroy_group(group);
return ERR_PTR(-ENOMEM);
}
group->overflow_event = &oevent->fse;
fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
oevent->wd = -1;
oevent->sync_cookie = 0;
oevent->name_len = 0;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);

View File

@ -98,11 +98,11 @@ int fsnotify_add_notify_event(struct fsnotify_group *group,
if (group->q_len >= group->max_events) {
ret = 2;
/* Queue overflow event only if it isn't already queued */
if (!list_empty(&group->overflow_event.list)) {
if (!list_empty(&group->overflow_event->list)) {
mutex_unlock(&group->notification_mutex);
return ret;
}
event = &group->overflow_event;
event = group->overflow_event;
goto queue;
}

View File

@ -160,7 +160,7 @@ struct fsnotify_group {
struct fasync_struct *fsn_fa; /* async notification */
struct fsnotify_event overflow_event; /* Event we queue when the
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */