1
0
Fork 0

vfio Updates for v3.11

Largely hugepage support for vfio/type1 iommu and surrounding cleanups and fixes.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.13 (GNU/Linux)
 
 iQIcBAABAgAGBQJR2uNvAAoJECObm247sIsiJRYQAJK15MfXgJq2PtBABNvFUAOG
 nqUvLgBgM5Ow1NI0Rzh9jkNohNqCvXDFGaWXXnsaX83hIpi59GFK31W2E3SiFCj3
 xISA9SUnm7Kjt9LAF6HTNz805zBkshIOk4MCx6HlezVWSRlWwT3rZzI4dI2fMvl8
 iPRk1Ion3QSQui99HWfXv/rtezAIzgZqsFqPC6DjWRfN7LcdEtKtcQwnrSb5GGY9
 3TIRY9IRYTSfJ2yjSz5f5258JxoDG5sR8dTMkgG2Gm92iGvGcPGpzQWPzVc4t+TO
 PdTqtv9ftEyAJKsYTFjPIod8XbzJBa1FSPadVAIfwF0JCDcsSFjoWGp+RzMQQSF8
 MK3VsnQ/pqJfs2nJHDQbWbKu0qWYPntvOCdojZ4679ceDTd0t515npfYeDQuX8yU
 fAA5rB46mDXjyxikTP574NdnkcGjbAj7EOCp7s+WTsVPGQQ3mId/3fQw0Wg7bE6v
 jaJqdRj70SNTRHs8DFLQhvSZgpef4RzepE4sRBZqzY4vWd4riNcAC3Got+F2rQy3
 X4hcHHU/5LGLoGMxOJQmuBfKVM8RAgikq6w2RfttVMLeKCknKtJ29OnotKilvILh
 W8nAOGxRnkmONFfHakNJtLl5tQJ4FQXc2cG8OeIIhHgheJjUxL72/zv8bBxOo7rY
 jUBjtZ5riQXc/ck4FEGI
 =9+Jh
 -----END PGP SIGNATURE-----

Merge tag 'vfio-v3.11' of git://github.com/awilliam/linux-vfio

Pull vfio updates from Alex Williamson:
 "Largely hugepage support for vfio/type1 iommu and surrounding cleanups
  and fixes"

* tag 'vfio-v3.11' of git://github.com/awilliam/linux-vfio:
  vfio/type1: Fix leak on error path
  vfio: Limit group opens
  vfio/type1: Fix missed frees and zero sized removes
  vfio: fix documentation
  vfio: Provide module option to disable vfio_iommu_type1 hugepage support
  vfio: hugepage support for vfio_iommu_type1
  vfio: Convert type1 iommu to use rbtree
hifive-unleashed-5.1
Linus Torvalds 2013-07-10 14:50:08 -07:00
commit 15a49b9a90
4 changed files with 433 additions and 239 deletions

View File

@ -172,12 +172,12 @@ group and can access them as follows:
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
/* Create a new container */
container = open("/dev/vfio/vfio, O_RDWR);
container = open("/dev/vfio/vfio", O_RDWR);
if (ioctl(container, VFIO_GET_API_VERSION) != VFIO_API_VERSION)
/* Unknown API version */
if (!ioctl(container, VFIO_CHECK_EXTENSION, VFIO_X86_IOMMU))
if (!ioctl(container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU))
/* Doesn't support the IOMMU driver we want. */
/* Open the group */
@ -193,7 +193,7 @@ group and can access them as follows:
ioctl(group, VFIO_GROUP_SET_CONTAINER, &container);
/* Enable the IOMMU model we want */
ioctl(container, VFIO_SET_IOMMU, VFIO_X86_IOMMU)
ioctl(container, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU)
/* Get addition IOMMU info */
ioctl(container, VFIO_IOMMU_GET_INFO, &iommu_info);

View File

@ -76,6 +76,7 @@ struct vfio_group {
struct notifier_block nb;
struct list_head vfio_next;
struct list_head container_next;
atomic_t opened;
};
struct vfio_device {
@ -206,6 +207,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
INIT_LIST_HEAD(&group->device_list);
mutex_init(&group->device_lock);
atomic_set(&group->container_users, 0);
atomic_set(&group->opened, 0);
group->iommu_group = iommu_group;
group->nb.notifier_call = vfio_iommu_group_notifier;
@ -1236,12 +1238,22 @@ static long vfio_group_fops_compat_ioctl(struct file *filep,
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group;
int opened;
group = vfio_group_get_from_minor(iminor(inode));
if (!group)
return -ENODEV;
/* Do we need multiple instances of the group open? Seems not. */
opened = atomic_cmpxchg(&group->opened, 0, 1);
if (opened) {
vfio_group_put(group);
return -EBUSY;
}
/* Is something still in use from a previous open? */
if (group->container) {
atomic_dec(&group->opened);
vfio_group_put(group);
return -EBUSY;
}
@ -1259,6 +1271,8 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
vfio_group_try_dissolve_container(group);
atomic_dec(&group->opened);
vfio_group_put(group);
return 0;

View File

@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pci.h> /* pci_bus_type */
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@ -47,19 +48,25 @@ module_param_named(allow_unsafe_interrupts,
MODULE_PARM_DESC(allow_unsafe_interrupts,
"Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
static bool disable_hugepages;
module_param_named(disable_hugepages,
disable_hugepages, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
struct vfio_iommu {
struct iommu_domain *domain;
struct mutex lock;
struct list_head dma_list;
struct rb_root dma_list;
struct list_head group_list;
bool cache;
};
struct vfio_dma {
struct list_head next;
struct rb_node node;
dma_addr_t iova; /* Device address */
unsigned long vaddr; /* Process virtual addr */
long npage; /* Number of pages */
size_t size; /* Map size (bytes) */
int prot; /* IOMMU_READ/WRITE */
};
@ -73,7 +80,48 @@ struct vfio_group {
* into DMA'ble space using the IOMMU
*/
#define NPAGE_TO_SIZE(npage) ((size_t)(npage) << PAGE_SHIFT)
static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
dma_addr_t start, size_t size)
{
struct rb_node *node = iommu->dma_list.rb_node;
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
if (start + size <= dma->iova)
node = node->rb_left;
else if (start >= dma->iova + dma->size)
node = node->rb_right;
else
return dma;
}
return NULL;
}
static void vfio_insert_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
{
struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
struct vfio_dma *dma;
while (*link) {
parent = *link;
dma = rb_entry(parent, struct vfio_dma, node);
if (new->iova + new->size <= dma->iova)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&new->node, parent, link);
rb_insert_color(&new->node, &iommu->dma_list);
}
static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
{
rb_erase(&old->node, &iommu->dma_list);
}
struct vwork {
struct mm_struct *mm;
@ -100,8 +148,8 @@ static void vfio_lock_acct(long npage)
struct vwork *vwork;
struct mm_struct *mm;
if (!current->mm)
return; /* process exited */
if (!current->mm || !npage)
return; /* process exited or nothing to do */
if (down_write_trylock(&current->mm->mmap_sem)) {
current->mm->locked_vm += npage;
@ -173,33 +221,6 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
/* Unmap DMA region */
static long __vfio_dma_do_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
long npage, int prot)
{
long i, unlocked = 0;
for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
unsigned long pfn;
pfn = iommu_iova_to_phys(iommu->domain, iova) >> PAGE_SHIFT;
if (pfn) {
iommu_unmap(iommu->domain, iova, PAGE_SIZE);
unlocked += put_pfn(pfn, prot);
}
}
return unlocked;
}
static void vfio_dma_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
long npage, int prot)
{
long unlocked;
unlocked = __vfio_dma_do_unmap(iommu, iova, npage, prot);
vfio_lock_acct(-unlocked);
}
static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
{
struct page *page[1];
@ -226,198 +247,306 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
return ret;
}
/* Map DMA region */
static int __vfio_dma_map(struct vfio_iommu *iommu, dma_addr_t iova,
unsigned long vaddr, long npage, int prot)
/*
* Attempt to pin pages. We really don't want to track all the pfns and
* the iommu can only map chunks of consecutive pfns anyway, so get the
* first page and all consecutive pages with the same locking.
*/
static long vfio_pin_pages(unsigned long vaddr, long npage,
int prot, unsigned long *pfn_base)
{
dma_addr_t start = iova;
long i, locked = 0;
int ret;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool lock_cap = capable(CAP_IPC_LOCK);
long ret, i;
/* Verify that pages are not already mapped */
for (i = 0; i < npage; i++, iova += PAGE_SIZE)
if (iommu_iova_to_phys(iommu->domain, iova))
return -EBUSY;
if (!current->mm)
return -ENODEV;
iova = start;
ret = vaddr_get_pfn(vaddr, prot, pfn_base);
if (ret)
return ret;
if (iommu->cache)
prot |= IOMMU_CACHE;
if (is_invalid_reserved_pfn(*pfn_base))
return 1;
/*
* XXX We break mappings into pages and use get_user_pages_fast to
* pin the pages in memory. It's been suggested that mlock might
* provide a more efficient mechanism, but nothing prevents the
* user from munlocking the pages, which could then allow the user
* access to random host memory. We also have no guarantee from the
* IOMMU API that the iommu driver can unmap sub-pages of previous
* mappings. This means we might lose an entire range if a single
* page within it is unmapped. Single page mappings are inefficient,
* but provide the most flexibility for now.
*/
for (i = 0; i < npage; i++, iova += PAGE_SIZE, vaddr += PAGE_SIZE) {
if (!lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
limit << PAGE_SHIFT);
return -ENOMEM;
}
if (unlikely(disable_hugepages)) {
vfio_lock_acct(1);
return 1;
}
/* Lock all the consecutive pages from pfn_base */
for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
unsigned long pfn = 0;
ret = vaddr_get_pfn(vaddr, prot, &pfn);
if (ret) {
__vfio_dma_do_unmap(iommu, start, i, prot);
return ret;
if (ret)
break;
if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
put_pfn(pfn, prot);
break;
}
/*
* Only add actual locked pages to accounting
* XXX We're effectively marking a page locked for every
* IOVA page even though it's possible the user could be
* backing multiple IOVAs with the same vaddr. This over-
* penalizes the user process, but we currently have no
* easy way to do this properly.
*/
if (!is_invalid_reserved_pfn(pfn))
locked++;
ret = iommu_map(iommu->domain, iova,
(phys_addr_t)pfn << PAGE_SHIFT,
PAGE_SIZE, prot);
if (ret) {
/* Back out mappings on error */
if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
put_pfn(pfn, prot);
__vfio_dma_do_unmap(iommu, start, i, prot);
return ret;
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, limit << PAGE_SHIFT);
break;
}
}
vfio_lock_acct(locked);
vfio_lock_acct(i);
return i;
}
static long vfio_unpin_pages(unsigned long pfn, long npage,
int prot, bool do_accounting)
{
unsigned long unlocked = 0;
long i;
for (i = 0; i < npage; i++)
unlocked += put_pfn(pfn++, prot);
if (do_accounting)
vfio_lock_acct(-unlocked);
return unlocked;
}
static int vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma_addr_t iova, size_t *size)
{
dma_addr_t start = iova, end = iova + *size;
long unlocked = 0;
while (iova < end) {
size_t unmapped;
phys_addr_t phys;
/*
* We use the IOMMU to track the physical address. This
* saves us from having a lot more entries in our mapping
* tree. The downside is that we don't track the size
* used to do the mapping. We request unmap of a single
* page, but expect IOMMUs that support large pages to
* unmap a larger chunk.
*/
phys = iommu_iova_to_phys(iommu->domain, iova);
if (WARN_ON(!phys)) {
iova += PAGE_SIZE;
continue;
}
unmapped = iommu_unmap(iommu->domain, iova, PAGE_SIZE);
if (!unmapped)
break;
unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
unmapped >> PAGE_SHIFT,
dma->prot, false);
iova += unmapped;
}
vfio_lock_acct(-unlocked);
*size = iova - start;
return 0;
}
static inline bool ranges_overlap(dma_addr_t start1, size_t size1,
dma_addr_t start2, size_t size2)
{
if (start1 < start2)
return (start2 - start1 < size1);
else if (start2 < start1)
return (start1 - start2 < size2);
return (size1 > 0 && size2 > 0);
}
static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
dma_addr_t start, size_t size)
{
struct vfio_dma *dma;
list_for_each_entry(dma, &iommu->dma_list, next) {
if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
start, size))
return dma;
}
return NULL;
}
static long vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
size_t size, struct vfio_dma *dma)
static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
size_t *size, struct vfio_dma *dma)
{
size_t offset, overlap, tmp;
struct vfio_dma *split;
long npage_lo, npage_hi;
int ret;
/* Existing dma region is completely covered, unmap all */
if (start <= dma->iova &&
start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
list_del(&dma->next);
npage_lo = dma->npage;
if (!*size)
return 0;
/*
* Existing dma region is completely covered, unmap all. This is
* the likely case since userspace tends to map and unmap buffers
* in one shot rather than multiple mappings within a buffer.
*/
if (likely(start <= dma->iova &&
start + *size >= dma->iova + dma->size)) {
*size = dma->size;
ret = vfio_unmap_unpin(iommu, dma, dma->iova, size);
if (ret)
return ret;
/*
* Did we remove more than we have? Should never happen
* since a vfio_dma is contiguous in iova and vaddr.
*/
WARN_ON(*size != dma->size);
vfio_remove_dma(iommu, dma);
kfree(dma);
return npage_lo;
return 0;
}
/* Overlap low address of existing range */
if (start <= dma->iova) {
size_t overlap;
overlap = start + *size - dma->iova;
ret = vfio_unmap_unpin(iommu, dma, dma->iova, &overlap);
if (ret)
return ret;
overlap = start + size - dma->iova;
npage_lo = overlap >> PAGE_SHIFT;
vfio_remove_dma(iommu, dma);
vfio_dma_unmap(iommu, dma->iova, npage_lo, dma->prot);
dma->iova += overlap;
dma->vaddr += overlap;
dma->npage -= npage_lo;
return npage_lo;
/*
* Check, we may have removed to whole vfio_dma. If not
* fixup and re-insert.
*/
if (overlap < dma->size) {
dma->iova += overlap;
dma->vaddr += overlap;
dma->size -= overlap;
vfio_insert_dma(iommu, dma);
} else
kfree(dma);
*size = overlap;
return 0;
}
/* Overlap high address of existing range */
if (start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
size_t overlap;
if (start + *size >= dma->iova + dma->size) {
offset = start - dma->iova;
overlap = dma->size - offset;
overlap = dma->iova + NPAGE_TO_SIZE(dma->npage) - start;
npage_hi = overlap >> PAGE_SHIFT;
ret = vfio_unmap_unpin(iommu, dma, start, &overlap);
if (ret)
return ret;
vfio_dma_unmap(iommu, start, npage_hi, dma->prot);
dma->npage -= npage_hi;
return npage_hi;
dma->size -= overlap;
*size = overlap;
return 0;
}
/* Split existing */
npage_lo = (start - dma->iova) >> PAGE_SHIFT;
npage_hi = dma->npage - (size >> PAGE_SHIFT) - npage_lo;
split = kzalloc(sizeof *split, GFP_KERNEL);
/*
* Allocate our tracking structure early even though it may not
* be used. An Allocation failure later loses track of pages and
* is more difficult to unwind.
*/
split = kzalloc(sizeof(*split), GFP_KERNEL);
if (!split)
return -ENOMEM;
vfio_dma_unmap(iommu, start, size >> PAGE_SHIFT, dma->prot);
offset = start - dma->iova;
dma->npage = npage_lo;
ret = vfio_unmap_unpin(iommu, dma, start, size);
if (ret || !*size) {
kfree(split);
return ret;
}
split->npage = npage_hi;
split->iova = start + size;
split->vaddr = dma->vaddr + NPAGE_TO_SIZE(npage_lo) + size;
split->prot = dma->prot;
list_add(&split->next, &iommu->dma_list);
return size >> PAGE_SHIFT;
tmp = dma->size;
/* Resize the lower vfio_dma in place, before the below insert */
dma->size = offset;
/* Insert new for remainder, assuming it didn't all get unmapped */
if (likely(offset + *size < tmp)) {
split->size = tmp - offset - *size;
split->iova = dma->iova + offset + *size;
split->vaddr = dma->vaddr + offset + *size;
split->prot = dma->prot;
vfio_insert_dma(iommu, split);
} else
kfree(split);
return 0;
}
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap)
{
long ret = 0, npage = unmap->size >> PAGE_SHIFT;
struct vfio_dma *dma, *tmp;
uint64_t mask;
struct vfio_dma *dma;
size_t unmapped = 0, size;
int ret = 0;
mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
if (unmap->iova & mask)
return -EINVAL;
if (unmap->size & mask)
if (!unmap->size || unmap->size & mask)
return -EINVAL;
/* XXX We still break these down into PAGE_SIZE */
WARN_ON(mask & PAGE_MASK);
mutex_lock(&iommu->lock);
list_for_each_entry_safe(dma, tmp, &iommu->dma_list, next) {
if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
unmap->iova, unmap->size)) {
ret = vfio_remove_dma_overlap(iommu, unmap->iova,
unmap->size, dma);
if (ret > 0)
npage -= ret;
if (ret < 0 || npage == 0)
break;
}
while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
size = unmap->size;
ret = vfio_remove_dma_overlap(iommu, unmap->iova, &size, dma);
if (ret || !size)
break;
unmapped += size;
}
mutex_unlock(&iommu->lock);
return ret > 0 ? 0 : (int)ret;
/*
* We may unmap more than requested, update the unmap struct so
* userspace can know.
*/
unmap->size = unmapped;
return ret;
}
/*
* Turns out AMD IOMMU has a page table bug where it won't map large pages
* to a region that previously mapped smaller pages. This should be fixed
* soon, so this is just a temporary workaround to break mappings down into
* PAGE_SIZE. Better to map smaller pages than nothing.
*/
static int map_try_harder(struct vfio_iommu *iommu, dma_addr_t iova,
unsigned long pfn, long npage, int prot)
{
long i;
int ret;
for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
ret = iommu_map(iommu->domain, iova,
(phys_addr_t)pfn << PAGE_SHIFT,
PAGE_SIZE, prot);
if (ret)
break;
}
for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
iommu_unmap(iommu->domain, iova, PAGE_SIZE);
return ret;
}
static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map)
{
struct vfio_dma *dma, *pdma = NULL;
dma_addr_t iova = map->iova;
unsigned long locked, lock_limit, vaddr = map->vaddr;
dma_addr_t end, iova;
unsigned long vaddr = map->vaddr;
size_t size = map->size;
long npage;
int ret = 0, prot = 0;
uint64_t mask;
long npage;
end = map->iova + map->size;
mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
@ -430,104 +559,144 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
if (!prot)
return -EINVAL; /* No READ/WRITE? */
if (iommu->cache)
prot |= IOMMU_CACHE;
if (vaddr & mask)
return -EINVAL;
if (iova & mask)
if (map->iova & mask)
return -EINVAL;
if (size & mask)
if (!map->size || map->size & mask)
return -EINVAL;
/* XXX We still break these down into PAGE_SIZE */
WARN_ON(mask & PAGE_MASK);
/* Don't allow IOVA wrap */
if (iova + size && iova + size < iova)
if (end && end < map->iova)
return -EINVAL;
/* Don't allow virtual address wrap */
if (vaddr + size && vaddr + size < vaddr)
return -EINVAL;
npage = size >> PAGE_SHIFT;
if (!npage)
if (vaddr + map->size && vaddr + map->size < vaddr)
return -EINVAL;
mutex_lock(&iommu->lock);
if (vfio_find_dma(iommu, iova, size)) {
ret = -EBUSY;
goto out_lock;
if (vfio_find_dma(iommu, map->iova, map->size)) {
mutex_unlock(&iommu->lock);
return -EEXIST;
}
/* account for locked pages */
locked = current->mm->locked_vm + npage;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, rlimit(RLIMIT_MEMLOCK));
ret = -ENOMEM;
goto out_lock;
}
for (iova = map->iova; iova < end; iova += size, vaddr += size) {
struct vfio_dma *dma = NULL;
unsigned long pfn;
long i;
ret = __vfio_dma_map(iommu, iova, vaddr, npage, prot);
if (ret)
goto out_lock;
/* Check if we abut a region below - nothing below 0 */
if (iova) {
dma = vfio_find_dma(iommu, iova - 1, 1);
if (dma && dma->prot == prot &&
dma->vaddr + NPAGE_TO_SIZE(dma->npage) == vaddr) {
dma->npage += npage;
iova = dma->iova;
vaddr = dma->vaddr;
npage = dma->npage;
size = NPAGE_TO_SIZE(npage);
pdma = dma;
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT,
prot, &pfn);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
break;
}
}
/* Check if we abut a region above - nothing above ~0 + 1 */
if (iova + size) {
dma = vfio_find_dma(iommu, iova + size, 1);
if (dma && dma->prot == prot &&
dma->vaddr == vaddr + size) {
/* Verify pages are not already mapped */
for (i = 0; i < npage; i++) {
if (iommu_iova_to_phys(iommu->domain,
iova + (i << PAGE_SHIFT))) {
vfio_unpin_pages(pfn, npage, prot, true);
ret = -EBUSY;
break;
}
}
dma->npage += npage;
ret = iommu_map(iommu->domain, iova,
(phys_addr_t)pfn << PAGE_SHIFT,
npage << PAGE_SHIFT, prot);
if (ret) {
if (ret != -EBUSY ||
map_try_harder(iommu, iova, pfn, npage, prot)) {
vfio_unpin_pages(pfn, npage, prot, true);
break;
}
}
size = npage << PAGE_SHIFT;
/*
* Check if we abut a region below - nothing below 0.
* This is the most likely case when mapping chunks of
* physically contiguous regions within a virtual address
* range. Update the abutting entry in place since iova
* doesn't change.
*/
if (likely(iova)) {
struct vfio_dma *tmp;
tmp = vfio_find_dma(iommu, iova - 1, 1);
if (tmp && tmp->prot == prot &&
tmp->vaddr + tmp->size == vaddr) {
tmp->size += size;
iova = tmp->iova;
size = tmp->size;
vaddr = tmp->vaddr;
dma = tmp;
}
}
/*
* Check if we abut a region above - nothing above ~0 + 1.
* If we abut above and below, remove and free. If only
* abut above, remove, modify, reinsert.
*/
if (likely(iova + size)) {
struct vfio_dma *tmp;
tmp = vfio_find_dma(iommu, iova + size, 1);
if (tmp && tmp->prot == prot &&
tmp->vaddr == vaddr + size) {
vfio_remove_dma(iommu, tmp);
if (dma) {
dma->size += tmp->size;
kfree(tmp);
} else {
size += tmp->size;
tmp->size = size;
tmp->iova = iova;
tmp->vaddr = vaddr;
vfio_insert_dma(iommu, tmp);
dma = tmp;
}
}
}
if (!dma) {
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
iommu_unmap(iommu->domain, iova, size);
vfio_unpin_pages(pfn, npage, prot, true);
ret = -ENOMEM;
break;
}
dma->size = size;
dma->iova = iova;
dma->vaddr = vaddr;
/*
* If merged above and below, remove previously
* merged entry. New entry covers it.
*/
if (pdma) {
list_del(&pdma->next);
kfree(pdma);
}
pdma = dma;
dma->prot = prot;
vfio_insert_dma(iommu, dma);
}
}
/* Isolated, new region */
if (!pdma) {
dma = kzalloc(sizeof *dma, GFP_KERNEL);
if (!dma) {
ret = -ENOMEM;
vfio_dma_unmap(iommu, iova, npage, prot);
goto out_lock;
if (ret) {
struct vfio_dma *tmp;
iova = map->iova;
size = map->size;
while ((tmp = vfio_find_dma(iommu, iova, size))) {
int r = vfio_remove_dma_overlap(iommu, iova,
&size, tmp);
if (WARN_ON(r || !size))
break;
}
dma->npage = npage;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
list_add(&dma->next, &iommu->dma_list);
}
out_lock:
mutex_unlock(&iommu->lock);
return ret;
}
@ -606,7 +775,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&iommu->group_list);
INIT_LIST_HEAD(&iommu->dma_list);
iommu->dma_list = RB_ROOT;
mutex_init(&iommu->lock);
/*
@ -640,7 +809,7 @@ static void vfio_iommu_type1_release(void *iommu_data)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_group *group, *group_tmp;
struct vfio_dma *dma, *dma_tmp;
struct rb_node *node;
list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) {
iommu_detach_group(iommu->domain, group->iommu_group);
@ -648,10 +817,12 @@ static void vfio_iommu_type1_release(void *iommu_data)
kfree(group);
}
list_for_each_entry_safe(dma, dma_tmp, &iommu->dma_list, next) {
vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
list_del(&dma->next);
kfree(dma);
while ((node = rb_first(&iommu->dma_list))) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
size_t size = dma->size;
vfio_remove_dma_overlap(iommu, dma->iova, &size, dma);
if (WARN_ON(!size))
break;
}
iommu_domain_free(iommu->domain);
@ -706,6 +877,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
struct vfio_iommu_type1_dma_unmap unmap;
long ret;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
@ -715,7 +887,11 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;
return vfio_dma_do_unmap(iommu, &unmap);
ret = vfio_dma_do_unmap(iommu, &unmap);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &unmap, minsz);
}
return -ENOTTY;

View File

@ -362,10 +362,14 @@ struct vfio_iommu_type1_dma_map {
#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
/**
* VFIO_IOMMU_UNMAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 14, struct vfio_dma_unmap)
* VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
* struct vfio_dma_unmap)
*
* Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
* Caller sets argsz.
* Caller sets argsz. The actual unmapped size is returned in the size
* field. No guarantee is made to the user that arbitrary unmaps of iova
* or size different from those used in the original mapping call will
* succeed.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;