fsnotify: fsnotify_add_notify_event should return an event

Rather than the horrific void ** argument and such just to pass the
fanotify_merge event back to the caller of fsnotify_add_notify_event() have
those things return an event if it was different than the event suggusted to
be added.

Signed-off-by: Eric Paris <eparis@redhat.com>
This commit is contained in:
Eric Paris 2010-07-28 10:18:37 -04:00
parent 5ba08e2eeb
commit f70ab54cc6
5 changed files with 101 additions and 95 deletions

View file

@ -30,65 +30,58 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
return false;
}
/* Note, if we return an event in *arg that a reference is being held... */
static int fanotify_merge(struct list_head *list,
struct fsnotify_event *event,
void **arg)
/* and the list better be locked by something too! */
static struct fsnotify_event *fanotify_merge(struct list_head *list,
struct fsnotify_event *event)
{
struct fsnotify_event_holder *test_holder;
struct fsnotify_event *test_event;
struct fsnotify_event *test_event = NULL;
struct fsnotify_event *new_event;
struct fsnotify_event **return_event = (struct fsnotify_event **)arg;
int ret = 0;
pr_debug("%s: list=%p event=%p\n", __func__, list, event);
*return_event = NULL;
/* and the list better be locked by something too! */
list_for_each_entry_reverse(test_holder, list, event_list) {
test_event = test_holder->event;
if (should_merge(test_event, event)) {
fsnotify_get_event(test_event);
*return_event = test_event;
ret = -EEXIST;
/* if they are exactly the same we are done */
if (test_event->mask == event->mask)
goto out;
/*
* if the refcnt == 1 this is the only queue
* for this event and so we can update the mask
* in place.
*/
if (atomic_read(&test_event->refcnt) == 1) {
test_event->mask |= event->mask;
goto out;
}
/* can't allocate memory, merge was no possible */
new_event = fsnotify_clone_event(test_event);
if (unlikely(!new_event)) {
ret = 0;
goto out;
}
/* we didn't return the test_event, so drop that ref */
fsnotify_put_event(test_event);
/* the reference we return on new_event is from clone */
*return_event = new_event;
/* build new event and replace it on the list */
new_event->mask = (test_event->mask | event->mask);
fsnotify_replace_event(test_holder, new_event);
if (should_merge(test_holder->event, event)) {
test_event = test_holder->event;
break;
}
}
out:
return ret;
if (!test_event)
return NULL;
fsnotify_get_event(test_event);
/* if they are exactly the same we are done */
if (test_event->mask == event->mask)
return test_event;
/*
* if the refcnt == 2 this is the only queue
* for this event and so we can update the mask
* in place.
*/
if (atomic_read(&test_event->refcnt) == 2) {
test_event->mask |= event->mask;
return test_event;
}
new_event = fsnotify_clone_event(test_event);
/* done with test_event */
fsnotify_put_event(test_event);
/* couldn't allocate memory, merge was not possible */
if (unlikely(!new_event))
return ERR_PTR(-ENOMEM);
/* build new event and replace it on the list */
new_event->mask = (test_event->mask | event->mask);
fsnotify_replace_event(test_holder, new_event);
/* we hold a reference on new_event from clone_event */
return new_event;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
@ -123,7 +116,7 @@ static int fanotify_get_response_from_access(struct fsnotify_group *group,
static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
int ret;
int ret = 0;
struct fsnotify_event *notify_event = NULL;
BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
@ -138,13 +131,9 @@ static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_e
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
ret = fsnotify_add_notify_event(group, event, NULL, fanotify_merge,
(void **)&notify_event);
/* -EEXIST means this event was merged with another, not that it was an error */
if (ret == -EEXIST)
ret = 0;
if (ret)
goto out;
notify_event = fsnotify_add_notify_event(group, event, NULL, fanotify_merge);
if (IS_ERR(notify_event))
return PTR_ERR(notify_event);
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
if (event->mask & FAN_ALL_PERM_EVENTS) {
@ -155,9 +144,9 @@ static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_e
}
#endif
out:
if (notify_event)
fsnotify_put_event(notify_event);
return ret;
}

View file

@ -68,13 +68,11 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
return false;
}
static int inotify_merge(struct list_head *list,
struct fsnotify_event *event,
void **arg)
static struct fsnotify_event *inotify_merge(struct list_head *list,
struct fsnotify_event *event)
{
struct fsnotify_event_holder *last_holder;
struct fsnotify_event *last_event;
int ret = 0;
/* and the list better be locked by something too */
spin_lock(&event->lock);
@ -82,11 +80,13 @@ static int inotify_merge(struct list_head *list,
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
last_event = last_holder->event;
if (event_compare(last_event, event))
ret = -EEXIST;
fsnotify_get_event(last_event);
else
last_event = NULL;
spin_unlock(&event->lock);
return ret;
return last_event;
}
static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
@ -96,7 +96,8 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
struct inode *to_tell;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int wd, ret;
struct fsnotify_event *added_event;
int wd, ret = 0;
pr_debug("%s: group=%p event=%p to_tell=%p mask=%x\n", __func__, group,
event, event->to_tell, event->mask);
@ -120,14 +121,13 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
fsn_event_priv->group = group;
event_priv->wd = wd;
ret = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge, NULL);
if (ret) {
added_event = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge);
if (added_event) {
inotify_free_event_priv(fsn_event_priv);
/* EEXIST says we tail matched, EOVERFLOW isn't something
* to report up the stack. */
if ((ret == -EEXIST) ||
(ret == -EOVERFLOW))
ret = 0;
if (!IS_ERR(added_event))
fsnotify_put_event(added_event);
else
ret = PTR_ERR(added_event);
}
if (fsn_mark->mask & IN_ONESHOT)

View file

@ -516,7 +516,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event;
struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
@ -538,9 +538,14 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL, NULL);
if (ret)
notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (notify_event) {
if (IS_ERR(notify_event))
ret = PTR_ERR(notify_event);
else
fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv);
}
skip_send_ignore:

View file

@ -137,16 +137,14 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot
* event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv,
int (*merge)(struct list_head *,
struct fsnotify_event *,
void **arg),
void **arg)
struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv,
struct fsnotify_event *(*merge)(struct list_head *,
struct fsnotify_event *))
{
struct fsnotify_event *return_event = NULL;
struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list;
int rc = 0;
pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv);
@ -162,27 +160,37 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even
alloc_holder:
holder = fsnotify_alloc_event_holder();
if (!holder)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
}
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
event = q_overflow_event;
rc = -EOVERFLOW;
/*
* we need to return the overflow event
* which means we need a ref
*/
fsnotify_get_event(event);
return_event = event;
/* sorry, no private data on the overflow event */
priv = NULL;
}
if (!list_empty(list) && merge) {
int ret;
struct fsnotify_event *tmp;
ret = merge(list, event, arg);
if (ret) {
tmp = merge(list, event);
if (tmp) {
mutex_unlock(&group->notification_mutex);
if (return_event)
fsnotify_put_event(return_event);
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
return ret;
return tmp;
}
}
@ -197,6 +205,12 @@ alloc_holder:
* event holder was used, go back and get a new one */
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
if (return_event) {
fsnotify_put_event(return_event);
return_event = NULL;
}
goto alloc_holder;
}
@ -211,7 +225,7 @@ alloc_holder:
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
return rc;
return return_event;
}
/*

View file

@ -379,13 +379,11 @@ extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struc
struct fsnotify_event *event);
/* attach the event to the group notification queue */
extern int fsnotify_add_notify_event(struct fsnotify_group *group,
struct fsnotify_event *event,
struct fsnotify_event_private_data *priv,
int (*merge)(struct list_head *,
struct fsnotify_event *,
void **),
void **arg);
extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
struct fsnotify_event *event,
struct fsnotify_event_private_data *priv,
struct fsnotify_event *(*merge)(struct list_head *,
struct fsnotify_event *));
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */