1
0
Fork 0

VFIO updates for v4.13-rc1

- Include Intel XXV710 in INTx workaround (Alex Williamson)
 
  - Make use of ERR_CAST() for error return (Dan Carpenter)
 
  - Fix vfio_group release deadlock from iommu notifier (Alex Williamson)
 
  - Unset KVM-VFIO attributes only on group match (Alex Williamson)
 
  - Fix release path group/file matching with KVM-VFIO (Alex Williamson)
 
  - Remove unnecessary lock uses triggering lockdep splat (Alex Williamson)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJZZ71TAAoJECObm247sIsi1mUP/1/5kubhHRE/Y11SnX4Bpie0
 X6YVbV3WLGuV9jaFMI7EgYJLZ6pBvCRX0CLUzEdrbS4LAlTLQu2GSY5tqhcLJumE
 0mV1Wj1jOO9BAur13/sJ+IohbZeK10dtbTkWv+YhrVSpRaLP+ituaKsajEV12WRH
 6dxho2bqfvNcTC8yjE+vqe9mU3jXYPGuCx8oYGaDXEsCjzrdbOFnAG0/s/2cWpb+
 D1ADHd3020VAHZRnHRBLFfMczza1jqllhSAUfdMw1gRGCQDq3k1XenzVNLLTbtYy
 VmEWHa+R/OCfbKVxaDqPsgOTK7x7DKn+Pzb3lWCdQ8v5X+2ubHpVIYjxTDSSTbt3
 YJ7a+hNk8AHkFgwS7x8BdOT8mmNGb1NZldjS4dv2VWkfcTnMQnubnYSCzGztto9h
 P2THKBil6djPb9S3pCvtKUiHSIZedYZlKofUldrOdGDAZmzLLlf8lTzijGjDYKFM
 pQeZC+xeEhZXURipgkH+a+paYgDtKEfwSlABODghjCcJf7S/GbyVPLOKLXzoVb2y
 Ml8eGlo4O/cNniQK5faH447ilM7hzS1aG83uGnHTfe8VgKjI7Z5ZSxKOtoEq5bDz
 bb91E6GVLKHqT0LVS1YZfrnqK0hX/QAd/sK1REM5nN8JNmPLyLpjv8FaJEhpk1vC
 z4At5+pfKM8DYrW3EGmc
 =3A4K
 -----END PGP SIGNATURE-----

Merge tag 'vfio-v4.13-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:

 - Include Intel XXV710 in INTx workaround (Alex Williamson)

 - Make use of ERR_CAST() for error return (Dan Carpenter)

 - Fix vfio_group release deadlock from iommu notifier (Alex Williamson)

 - Unset KVM-VFIO attributes only on group match (Alex Williamson)

 - Fix release path group/file matching with KVM-VFIO (Alex Williamson)

 - Remove unnecessary lock uses triggering lockdep splat (Alex Williamson)

* tag 'vfio-v4.13-rc1' of git://github.com/awilliam/linux-vfio:
  vfio: Remove unnecessary uses of vfio_container.group_lock
  vfio: New external user group/file match
  kvm-vfio: Decouple only when we match a group
  vfio: Fix group release deadlock
  vfio: Use ERR_CAST() instead of open coding it
  vfio/pci: Add Intel XXV710 to hidden INTx devices
hifive-unleashed-5.1
Linus Torvalds 2017-07-13 12:23:54 -07:00
commit 8c6f5e7359
4 changed files with 75 additions and 57 deletions

View File

@ -195,11 +195,11 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
switch (pdev->vendor) { switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL: case PCI_VENDOR_ID_INTEL:
switch (pdev->device) { switch (pdev->device) {
/* All i40e (XL710/X710) 10/20/40GbE NICs */ /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
case 0x1572: case 0x1572:
case 0x1574: case 0x1574:
case 0x1580 ... 0x1581: case 0x1580 ... 0x1581:
case 0x1583 ... 0x1589: case 0x1583 ... 0x158b:
case 0x37d0 ... 0x37d2: case 0x37d0 ... 0x37d2:
return true; return true;
default: default:

View File

@ -382,7 +382,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
if (IS_ERR(dev)) { if (IS_ERR(dev)) {
vfio_free_group_minor(minor); vfio_free_group_minor(minor);
vfio_group_unlock_and_free(group); vfio_group_unlock_and_free(group);
return (struct vfio_group *)dev; /* ERR_PTR */ return ERR_CAST(dev);
} }
group->minor = minor; group->minor = minor;
@ -423,6 +423,34 @@ static void vfio_group_put(struct vfio_group *group)
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
} }
struct vfio_group_put_work {
struct work_struct work;
struct vfio_group *group;
};
static void vfio_group_put_bg(struct work_struct *work)
{
struct vfio_group_put_work *do_work;
do_work = container_of(work, struct vfio_group_put_work, work);
vfio_group_put(do_work->group);
kfree(do_work);
}
static void vfio_group_schedule_put(struct vfio_group *group)
{
struct vfio_group_put_work *do_work;
do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
if (WARN_ON(!do_work))
return;
INIT_WORK(&do_work->work, vfio_group_put_bg);
do_work->group = group;
schedule_work(&do_work->work);
}
/* Assume group_lock or group reference is held */ /* Assume group_lock or group reference is held */
static void vfio_group_get(struct vfio_group *group) static void vfio_group_get(struct vfio_group *group)
{ {
@ -762,7 +790,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
break; break;
} }
vfio_group_put(group); /*
* If we're the last reference to the group, the group will be
* released, which includes unregistering the iommu group notifier.
* We hold a read-lock on that notifier list, unregistering needs
* a write-lock... deadlock. Release our reference asynchronously
* to avoid that situation.
*/
vfio_group_schedule_put(group);
return NOTIFY_OK; return NOTIFY_OK;
} }
@ -1140,15 +1175,11 @@ static long vfio_fops_unl_ioctl(struct file *filep,
ret = vfio_ioctl_set_iommu(container, arg); ret = vfio_ioctl_set_iommu(container, arg);
break; break;
default: default:
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
data = container->iommu_data; data = container->iommu_data;
if (driver) /* passthrough all unrecognized ioctls */ if (driver) /* passthrough all unrecognized ioctls */
ret = driver->ops->ioctl(data, cmd, arg); ret = driver->ops->ioctl(data, cmd, arg);
up_read(&container->group_lock);
} }
return ret; return ret;
@ -1202,15 +1233,11 @@ static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
struct vfio_iommu_driver *driver; struct vfio_iommu_driver *driver;
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
if (likely(driver && driver->ops->read)) if (likely(driver && driver->ops->read))
ret = driver->ops->read(container->iommu_data, ret = driver->ops->read(container->iommu_data,
buf, count, ppos); buf, count, ppos);
up_read(&container->group_lock);
return ret; return ret;
} }
@ -1221,15 +1248,11 @@ static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
struct vfio_iommu_driver *driver; struct vfio_iommu_driver *driver;
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
if (likely(driver && driver->ops->write)) if (likely(driver && driver->ops->write))
ret = driver->ops->write(container->iommu_data, ret = driver->ops->write(container->iommu_data,
buf, count, ppos); buf, count, ppos);
up_read(&container->group_lock);
return ret; return ret;
} }
@ -1239,14 +1262,10 @@ static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
struct vfio_iommu_driver *driver; struct vfio_iommu_driver *driver;
int ret = -EINVAL; int ret = -EINVAL;
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
if (likely(driver && driver->ops->mmap)) if (likely(driver && driver->ops->mmap))
ret = driver->ops->mmap(container->iommu_data, vma); ret = driver->ops->mmap(container->iommu_data, vma);
up_read(&container->group_lock);
return ret; return ret;
} }
@ -1741,6 +1760,15 @@ void vfio_group_put_external_user(struct vfio_group *group)
} }
EXPORT_SYMBOL_GPL(vfio_group_put_external_user); EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
bool vfio_external_group_match_file(struct vfio_group *test_group,
struct file *filep)
{
struct vfio_group *group = filep->private_data;
return (filep->f_op == &vfio_group_fops) && (group == test_group);
}
EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
int vfio_external_user_iommu_id(struct vfio_group *group) int vfio_external_user_iommu_id(struct vfio_group *group)
{ {
return iommu_group_id(group->iommu_group); return iommu_group_id(group->iommu_group);
@ -1949,8 +1977,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
goto err_pin_pages; goto err_pin_pages;
container = group->container; container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages)) if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data, user_pfn, ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
@ -1958,7 +1984,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
else else
ret = -ENOTTY; ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group); vfio_group_try_dissolve_container(group);
err_pin_pages: err_pin_pages:
@ -1998,8 +2023,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
goto err_unpin_pages; goto err_unpin_pages;
container = group->container; container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
if (likely(driver && driver->ops->unpin_pages)) if (likely(driver && driver->ops->unpin_pages))
ret = driver->ops->unpin_pages(container->iommu_data, user_pfn, ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
@ -2007,7 +2030,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
else else
ret = -ENOTTY; ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group); vfio_group_try_dissolve_container(group);
err_unpin_pages: err_unpin_pages:
@ -2029,8 +2051,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
return -EINVAL; return -EINVAL;
container = group->container; container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
if (likely(driver && driver->ops->register_notifier)) if (likely(driver && driver->ops->register_notifier))
ret = driver->ops->register_notifier(container->iommu_data, ret = driver->ops->register_notifier(container->iommu_data,
@ -2038,7 +2058,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
else else
ret = -ENOTTY; ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group); vfio_group_try_dissolve_container(group);
return ret; return ret;
@ -2056,8 +2075,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
return -EINVAL; return -EINVAL;
container = group->container; container = group->container;
down_read(&container->group_lock);
driver = container->iommu_driver; driver = container->iommu_driver;
if (likely(driver && driver->ops->unregister_notifier)) if (likely(driver && driver->ops->unregister_notifier))
ret = driver->ops->unregister_notifier(container->iommu_data, ret = driver->ops->unregister_notifier(container->iommu_data,
@ -2065,7 +2082,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
else else
ret = -ENOTTY; ret = -ENOTTY;
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group); vfio_group_try_dissolve_container(group);
return ret; return ret;
@ -2083,7 +2099,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
unsigned long *events, unsigned long *events,
struct notifier_block *nb) struct notifier_block *nb)
{ {
struct vfio_container *container;
int ret; int ret;
bool set_kvm = false; bool set_kvm = false;
@ -2101,9 +2116,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
if (ret) if (ret)
return -EINVAL; return -EINVAL;
container = group->container;
down_read(&container->group_lock);
ret = blocking_notifier_chain_register(&group->notifier, nb); ret = blocking_notifier_chain_register(&group->notifier, nb);
/* /*
@ -2114,7 +2126,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
blocking_notifier_call_chain(&group->notifier, blocking_notifier_call_chain(&group->notifier,
VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group); vfio_group_try_dissolve_container(group);
return ret; return ret;
@ -2123,19 +2134,14 @@ static int vfio_register_group_notifier(struct vfio_group *group,
static int vfio_unregister_group_notifier(struct vfio_group *group, static int vfio_unregister_group_notifier(struct vfio_group *group,
struct notifier_block *nb) struct notifier_block *nb)
{ {
struct vfio_container *container;
int ret; int ret;
ret = vfio_group_add_container_user(group); ret = vfio_group_add_container_user(group);
if (ret) if (ret)
return -EINVAL; return -EINVAL;
container = group->container;
down_read(&container->group_lock);
ret = blocking_notifier_chain_unregister(&group->notifier, nb); ret = blocking_notifier_chain_unregister(&group->notifier, nb);
up_read(&container->group_lock);
vfio_group_try_dissolve_container(group); vfio_group_try_dissolve_container(group);
return ret; return ret;

View File

@ -97,6 +97,8 @@ extern void vfio_unregister_iommu_driver(
*/ */
extern struct vfio_group *vfio_group_get_external_user(struct file *filep); extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group); extern void vfio_group_put_external_user(struct vfio_group *group);
extern bool vfio_external_group_match_file(struct vfio_group *group,
struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group); extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group, extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg); unsigned long arg);

View File

@ -51,6 +51,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
return vfio_group; return vfio_group;
} }
static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
struct file *filep)
{
bool ret, (*fn)(struct vfio_group *, struct file *);
fn = symbol_get(vfio_external_group_match_file);
if (!fn)
return false;
ret = fn(group, filep);
symbol_put(vfio_external_group_match_file);
return ret;
}
static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
{ {
void (*fn)(struct vfio_group *); void (*fn)(struct vfio_group *);
@ -231,37 +247,31 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
if (!f.file) if (!f.file)
return -EBADF; return -EBADF;
vfio_group = kvm_vfio_group_get_external_user(f.file);
fdput(f);
if (IS_ERR(vfio_group))
return PTR_ERR(vfio_group);
ret = -ENOENT; ret = -ENOENT;
mutex_lock(&kv->lock); mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) { list_for_each_entry(kvg, &kv->group_list, node) {
if (kvg->vfio_group != vfio_group) if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
f.file))
continue; continue;
list_del(&kvg->node); list_del(&kvg->node);
kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm,
kvg->vfio_group);
#endif
kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
kvm_vfio_group_put_external_user(kvg->vfio_group); kvm_vfio_group_put_external_user(kvg->vfio_group);
kfree(kvg); kfree(kvg);
ret = 0; ret = 0;
break; break;
} }
kvm_arch_end_assignment(dev->kvm);
mutex_unlock(&kv->lock); mutex_unlock(&kv->lock);
#ifdef CONFIG_SPAPR_TCE_IOMMU fdput(f);
kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group);
#endif
kvm_vfio_group_set_kvm(vfio_group, NULL);
kvm_vfio_group_put_external_user(vfio_group);
kvm_vfio_update_coherency(dev); kvm_vfio_update_coherency(dev);