diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 2ec6c31d0ab0..5277c6c2fa72 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -466,6 +466,44 @@ out: vfio_pci_set_power_state(vdev, PCI_D3hot); } +static struct pci_driver vfio_pci_driver; + +static struct vfio_pci_device *get_pf_vdev(struct vfio_pci_device *vdev, + struct vfio_device **pf_dev) +{ + struct pci_dev *physfn = pci_physfn(vdev->pdev); + + if (!vdev->pdev->is_virtfn) + return NULL; + + *pf_dev = vfio_device_get_from_dev(&physfn->dev); + if (!*pf_dev) + return NULL; + + if (pci_dev_driver(physfn) != &vfio_pci_driver) { + vfio_device_put(*pf_dev); + return NULL; + } + + return vfio_device_data(*pf_dev); +} + +static void vfio_pci_vf_token_user_add(struct vfio_pci_device *vdev, int val) +{ + struct vfio_device *pf_dev; + struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev); + + if (!pf_vdev) + return; + + mutex_lock(&pf_vdev->vf_token->lock); + pf_vdev->vf_token->users += val; + WARN_ON(pf_vdev->vf_token->users < 0); + mutex_unlock(&pf_vdev->vf_token->lock); + + vfio_device_put(pf_dev); +} + static void vfio_pci_release(void *device_data) { struct vfio_pci_device *vdev = device_data; @@ -473,6 +511,7 @@ static void vfio_pci_release(void *device_data) mutex_lock(&vdev->reflck->lock); if (!(--vdev->refcnt)) { + vfio_pci_vf_token_user_add(vdev, -1); vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); } @@ -498,6 +537,7 @@ static int vfio_pci_open(void *device_data) goto error; vfio_spapr_pci_eeh_open(vdev->pdev); + vfio_pci_vf_token_user_add(vdev, 1); } vdev->refcnt++; error: @@ -1278,11 +1318,148 @@ static void vfio_pci_request(void *device_data, unsigned int count) mutex_unlock(&vdev->igate); } +static int vfio_pci_validate_vf_token(struct vfio_pci_device *vdev, + bool vf_token, uuid_t *uuid) +{ + /* + * There's always some degree of trust or collaboration between SR-IOV + * PF and VFs, even if just that the PF hosts the SR-IOV capability and + * can disrupt VFs with a reset, but often the PF has more explicit + * access to deny service to the VF or access data passed through the + * VF. We therefore require an opt-in via a shared VF token (UUID) to + * represent this trust. This both prevents that a VF driver might + * assume the PF driver is a trusted, in-kernel driver, and also that + * a PF driver might be replaced with a rogue driver, unknown to in-use + * VF drivers. + * + * Therefore when presented with a VF, if the PF is a vfio device and + * it is bound to the vfio-pci driver, the user needs to provide a VF + * token to access the device, in the form of appending a vf_token to + * the device name, for example: + * + * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3" + * + * When presented with a PF which has VFs in use, the user must also + * provide the current VF token to prove collaboration with existing + * VF users. If VFs are not in use, the VF token provided for the PF + * device will act to set the VF token. + * + * If the VF token is provided but unused, an error is generated. + */ + if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token) + return 0; /* No VF token provided or required */ + + if (vdev->pdev->is_virtfn) { + struct vfio_device *pf_dev; + struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev); + bool match; + + if (!pf_vdev) { + if (!vf_token) + return 0; /* PF is not vfio-pci, no VF token */ + + pci_info_ratelimited(vdev->pdev, + "VF token incorrectly provided, PF not bound to vfio-pci\n"); + return -EINVAL; + } + + if (!vf_token) { + vfio_device_put(pf_dev); + pci_info_ratelimited(vdev->pdev, + "VF token required to access device\n"); + return -EACCES; + } + + mutex_lock(&pf_vdev->vf_token->lock); + match = uuid_equal(uuid, &pf_vdev->vf_token->uuid); + mutex_unlock(&pf_vdev->vf_token->lock); + + vfio_device_put(pf_dev); + + if (!match) { + pci_info_ratelimited(vdev->pdev, + "Incorrect VF token provided for device\n"); + return -EACCES; + } + } else if (vdev->vf_token) { + mutex_lock(&vdev->vf_token->lock); + if (vdev->vf_token->users) { + if (!vf_token) { + mutex_unlock(&vdev->vf_token->lock); + pci_info_ratelimited(vdev->pdev, + "VF token required to access device\n"); + return -EACCES; + } + + if (!uuid_equal(uuid, &vdev->vf_token->uuid)) { + mutex_unlock(&vdev->vf_token->lock); + pci_info_ratelimited(vdev->pdev, + "Incorrect VF token provided for device\n"); + return -EACCES; + } + } else if (vf_token) { + uuid_copy(&vdev->vf_token->uuid, uuid); + } + + mutex_unlock(&vdev->vf_token->lock); + } else if (vf_token) { + pci_info_ratelimited(vdev->pdev, + "VF token incorrectly provided, not a PF or VF\n"); + return -EINVAL; + } + + return 0; +} + +#define VF_TOKEN_ARG "vf_token=" + static int vfio_pci_match(void *device_data, char *buf) { struct vfio_pci_device *vdev = device_data; + bool vf_token = false; + uuid_t uuid; + int ret; - return !strcmp(pci_name(vdev->pdev), buf); + if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev)))) + return 0; /* No match */ + + if (strlen(buf) > strlen(pci_name(vdev->pdev))) { + buf += strlen(pci_name(vdev->pdev)); + + if (*buf != ' ') + return 0; /* No match: non-whitespace after name */ + + while (*buf) { + if (*buf == ' ') { + buf++; + continue; + } + + if (!vf_token && !strncmp(buf, VF_TOKEN_ARG, + strlen(VF_TOKEN_ARG))) { + buf += strlen(VF_TOKEN_ARG); + + if (strlen(buf) < UUID_STRING_LEN) + return -EINVAL; + + ret = uuid_parse(buf, &uuid); + if (ret) + return ret; + + vf_token = true; + buf += UUID_STRING_LEN; + } else { + /* Unknown/duplicate option */ + return -EINVAL; + } + } + } + + ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid); + if (ret) + return ret; + + return 1; /* Match */ } static const struct vfio_device_ops vfio_pci_ops = { @@ -1354,6 +1531,19 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return ret; } + if (pdev->is_physfn) { + vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL); + if (!vdev->vf_token) { + vfio_pci_reflck_put(vdev->reflck); + vfio_del_group_dev(&pdev->dev); + vfio_iommu_group_put(group, &pdev->dev); + kfree(vdev); + return -ENOMEM; + } + mutex_init(&vdev->vf_token->lock); + uuid_gen(&vdev->vf_token->uuid); + } + if (vfio_pci_is_vga(pdev)) { vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode); vga_set_legacy_decoding(pdev, @@ -1387,6 +1577,12 @@ static void vfio_pci_remove(struct pci_dev *pdev) if (!vdev) return; + if (vdev->vf_token) { + WARN_ON(vdev->vf_token->users); + mutex_destroy(&vdev->vf_token->lock); + kfree(vdev->vf_token); + } + vfio_pci_reflck_put(vdev->reflck); vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 8a2c7607d513..76c11c915949 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -12,6 +12,7 @@ #include #include #include +#include #ifndef VFIO_PCI_PRIVATE_H #define VFIO_PCI_PRIVATE_H @@ -84,6 +85,12 @@ struct vfio_pci_reflck { struct mutex lock; }; +struct vfio_pci_vf_token { + struct mutex lock; + uuid_t uuid; + int users; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_NUM_BARS]; @@ -122,6 +129,7 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; + struct vfio_pci_vf_token *vf_token; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)