1
0
Fork 0

IB/uverbs: Move non driver related elements from ib_ucontext to ib_ufile

The IDR is part of the ib_ufile so all the machinery to lock it, handle
closing and disassociation rightly belongs to the ufile not the ucontext.

This changes the lifetime of that data to match the lifetime of the file
descriptor which is always strictly longer than the lifetime of the
ucontext.

We need the entire locking machinery to continue to exist after ucontext
destruction to allow us to return the destroy data after a device has been
disassociated.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
hifive-unleashed-5.1
Jason Gunthorpe 2018-07-04 11:32:07 +03:00
parent c33e73af21
commit 6a5e9c8841
6 changed files with 49 additions and 46 deletions

View File

@ -161,6 +161,7 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
* user_handle should be filled by the handler,
* The object is added to the list in the commit stage.
*/
uobj->ufile = context->ufile;
uobj->context = context;
uobj->type = type;
/*
@ -286,7 +287,7 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
ret = uverbs_try_lock_object(uobj, exclusive);
if (ret) {
WARN(ucontext->cleanup_reason,
WARN(uobj->ufile->cleanup_reason,
"ib_uverbs: Trying to lookup_get while cleanup context\n");
goto free;
}
@ -441,8 +442,8 @@ static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
enum rdma_remove_reason why)
{
struct ib_uverbs_file *ufile = uobj->ufile;
int ret;
struct ib_ucontext *ucontext = uobj->context;
ret = uobj->type->type_class->remove_commit(uobj, why);
if (ib_is_destroy_retryable(ret, why, uobj)) {
@ -450,9 +451,9 @@ static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
atomic_set(&uobj->usecnt, 0);
uobj->type->type_class->lookup_put(uobj, true);
} else {
mutex_lock(&ucontext->uobjects_lock);
mutex_lock(&ufile->uobjects_lock);
list_del(&uobj->list);
mutex_unlock(&ucontext->uobjects_lock);
mutex_unlock(&ufile->uobjects_lock);
/* put the ref we took when we created the object */
uverbs_uobject_put(uobj);
}
@ -464,19 +465,19 @@ static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
{
int ret;
struct ib_ucontext *ucontext = uobj->context;
struct ib_uverbs_file *ufile = uobj->ufile;
/* put the ref count we took at lookup_get */
uverbs_uobject_put(uobj);
/* Cleanup is running. Calling this should have been impossible */
if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
if (!down_read_trylock(&ufile->cleanup_rwsem)) {
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0;
}
assert_uverbs_usecnt(uobj, true);
ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
up_read(&ucontext->cleanup_rwsem);
up_read(&ufile->cleanup_rwsem);
return ret;
}
@ -496,10 +497,10 @@ static const struct uverbs_obj_type null_obj_type = {
int rdma_explicit_destroy(struct ib_uobject *uobject)
{
int ret;
struct ib_ucontext *ucontext = uobject->context;
struct ib_uverbs_file *ufile = uobject->ufile;
/* Cleanup is running. Calling this should have been impossible */
if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
if (!down_read_trylock(&ufile->cleanup_rwsem)) {
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0;
}
@ -512,7 +513,7 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
uobject->type = &null_obj_type;
out:
up_read(&ucontext->cleanup_rwsem);
up_read(&ufile->cleanup_rwsem);
return ret;
}
@ -542,8 +543,10 @@ static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
{
struct ib_uverbs_file *ufile = uobj->ufile;
/* Cleanup is running. Calling this should have been impossible */
if (!down_read_trylock(&uobj->context->cleanup_rwsem)) {
if (!down_read_trylock(&ufile->cleanup_rwsem)) {
int ret;
WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
@ -559,12 +562,12 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
assert_uverbs_usecnt(uobj, true);
atomic_set(&uobj->usecnt, 0);
mutex_lock(&uobj->context->uobjects_lock);
list_add(&uobj->list, &uobj->context->uobjects);
mutex_unlock(&uobj->context->uobjects_lock);
mutex_lock(&ufile->uobjects_lock);
list_add(&uobj->list, &ufile->uobjects);
mutex_unlock(&ufile->uobjects_lock);
uobj->type->type_class->alloc_commit(uobj);
up_read(&uobj->context->cleanup_rwsem);
up_read(&ufile->cleanup_rwsem);
return 0;
}
@ -638,20 +641,18 @@ EXPORT_SYMBOL(uverbs_idr_class);
static void _uverbs_close_fd(struct ib_uobject_file *uobj_file)
{
struct ib_ucontext *ucontext;
struct ib_uverbs_file *ufile = uobj_file->ufile;
int ret;
mutex_lock(&uobj_file->ufile->cleanup_mutex);
mutex_lock(&ufile->cleanup_mutex);
/* uobject was either already cleaned up or is cleaned up right now anyway */
if (!uobj_file->uobj.context ||
!down_read_trylock(&uobj_file->uobj.context->cleanup_rwsem))
!down_read_trylock(&ufile->cleanup_rwsem))
goto unlock;
ucontext = uobj_file->uobj.context;
ret = _rdma_remove_commit_uobject(&uobj_file->uobj, RDMA_REMOVE_CLOSE);
up_read(&ucontext->cleanup_rwsem);
up_read(&ufile->cleanup_rwsem);
if (ret)
pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
unlock:
@ -671,6 +672,7 @@ void uverbs_close_fd(struct file *f)
static int __uverbs_cleanup_ucontext(struct ib_ucontext *ucontext,
enum rdma_remove_reason reason)
{
struct ib_uverbs_file *ufile = ucontext->ufile;
struct ib_uobject *obj, *next_obj;
int ret = -EINVAL;
int err = 0;
@ -684,9 +686,9 @@ static int __uverbs_cleanup_ucontext(struct ib_ucontext *ucontext,
* We take and release the lock per traversal in order to let
* other threads (which might still use the FDs) chance to run.
*/
mutex_lock(&ucontext->uobjects_lock);
ucontext->cleanup_reason = reason;
list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects, list) {
mutex_lock(&ufile->uobjects_lock);
ufile->cleanup_reason = reason;
list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
/*
* if we hit this WARN_ON, that means we are
* racing with a lookup_get.
@ -710,7 +712,7 @@ static int __uverbs_cleanup_ucontext(struct ib_ucontext *ucontext,
uverbs_uobject_put(obj);
ret = 0;
}
mutex_unlock(&ucontext->uobjects_lock);
mutex_unlock(&ufile->uobjects_lock);
return ret;
}
@ -719,14 +721,16 @@ void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
enum rdma_remove_reason reason = device_removed ?
RDMA_REMOVE_DRIVER_REMOVE :
RDMA_REMOVE_CLOSE;
struct ib_uverbs_file *ufile = ucontext->ufile;
/*
* Waits for all remove_commit and alloc_commit to finish. Logically, We
* want to hold this forever as the context is going to be destroyed,
* but we'll release it since it causes a "held lock freed" BUG message.
*/
down_write(&ucontext->cleanup_rwsem);
ucontext->cleanup_retryable = true;
while (!list_empty(&ucontext->uobjects))
down_write(&ufile->cleanup_rwsem);
ufile->ucontext->cleanup_retryable = true;
while (!list_empty(&ufile->uobjects))
if (__uverbs_cleanup_ucontext(ucontext, reason)) {
/*
* No entry was cleaned-up successfully during this
@ -735,19 +739,11 @@ void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
break;
}
ucontext->cleanup_retryable = false;
if (!list_empty(&ucontext->uobjects))
ufile->ucontext->cleanup_retryable = false;
if (!list_empty(&ufile->uobjects))
__uverbs_cleanup_ucontext(ucontext, reason);
up_write(&ucontext->cleanup_rwsem);
}
void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
{
ucontext->cleanup_reason = 0;
mutex_init(&ucontext->uobjects_lock);
INIT_LIST_HEAD(&ucontext->uobjects);
init_rwsem(&ucontext->cleanup_rwsem);
up_write(&ufile->cleanup_rwsem);
}
const struct uverbs_obj_type_class uverbs_fd_class = {

View File

@ -56,7 +56,6 @@ const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_sp
* cleanup_ucontext removes all uobjects from the context and puts them.
*/
void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed);
void uverbs_initialize_ucontext(struct ib_ucontext *ucontext);
/*
* uverbs_uobject_get is called in order to increase the reference count on

View File

@ -145,6 +145,14 @@ struct ib_uverbs_file {
struct list_head list;
int is_closed;
/* locking the uobjects_list */
struct mutex uobjects_lock;
struct list_head uobjects;
/* protects cleanup process from other actions */
struct rw_semaphore cleanup_rwsem;
enum rdma_remove_reason cleanup_reason;
struct idr idr;
/* spinlock protects write access to idr */
spinlock_t idr_lock;

View File

@ -110,7 +110,6 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext->cg_obj = cg_obj;
/* ufile is required when some objects are released */
ucontext->ufile = file;
uverbs_initialize_ucontext(ucontext);
rcu_read_lock();
ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);

View File

@ -888,6 +888,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
mutex_init(&file->mutex);
mutex_init(&file->cleanup_mutex);
mutex_init(&file->uobjects_lock);
INIT_LIST_HEAD(&file->uobjects);
init_rwsem(&file->cleanup_rwsem);
filp->private_data = file;
kobject_get(&dev->kobj);
list_add_tail(&file->list, &dev->uverbs_file_list);

View File

@ -1500,12 +1500,6 @@ struct ib_ucontext {
struct ib_uverbs_file *ufile;
int closing;
/* locking the uobjects_list */
struct mutex uobjects_lock;
struct list_head uobjects;
/* protects cleanup process from other actions */
struct rw_semaphore cleanup_rwsem;
enum rdma_remove_reason cleanup_reason;
bool cleanup_retryable;
struct pid *tgid;
@ -1531,6 +1525,9 @@ struct ib_ucontext {
struct ib_uobject {
u64 user_handle; /* handle given to us by userspace */
/* ufile & ucontext owning this object */
struct ib_uverbs_file *ufile;
/* FIXME, save memory: ufile->context == context */
struct ib_ucontext *context; /* associated user context */
void *object; /* containing object */
struct list_head list; /* link to context's list */