iommu/vt-d: Replace Intel specific PASID allocator with IOASID

Make use of generic IOASID code to manage PASID allocation,
free, and lookup. Replace Intel specific code.

Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jacob Pan 2020-01-02 08:18:08 +08:00 committed by Joerg Roedel
parent 39d630e332
commit 59a623374d
4 changed files with 30 additions and 56 deletions

View file

@ -214,6 +214,7 @@ config INTEL_IOMMU_SVM
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER
select IOASID
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by

View file

@ -5282,7 +5282,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain,
domain->auxd_refcnt--;
if (!domain->auxd_refcnt && domain->default_pasid > 0)
intel_pasid_free_id(domain->default_pasid);
ioasid_free(domain->default_pasid);
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@ -5300,10 +5300,11 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
if (domain->default_pasid <= 0) {
int pasid;
pasid = intel_pasid_alloc_id(domain, PASID_MIN,
pci_max_pasids(to_pci_dev(dev)),
GFP_KERNEL);
if (pasid <= 0) {
/* No private data needed for the default pasid */
pasid = ioasid_alloc(NULL, PASID_MIN,
pci_max_pasids(to_pci_dev(dev)) - 1,
NULL);
if (pasid == INVALID_IOASID) {
pr_err("Can't allocate default pasid\n");
return -ENODEV;
}
@ -5339,7 +5340,7 @@ attach_failed:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
if (!domain->auxd_refcnt && domain->default_pasid > 0)
intel_pasid_free_id(domain->default_pasid);
ioasid_free(domain->default_pasid);
return ret;
}

View file

@ -26,42 +26,6 @@
*/
static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
static DEFINE_IDR(pasid_idr);
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
{
int ret, min, max;
min = max_t(int, start, PASID_MIN);
max = min_t(int, end, intel_pasid_max_id);
WARN_ON(in_interrupt());
idr_preload(gfp);
spin_lock(&pasid_lock);
ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
spin_unlock(&pasid_lock);
idr_preload_end();
return ret;
}
void intel_pasid_free_id(int pasid)
{
spin_lock(&pasid_lock);
idr_remove(&pasid_idr, pasid);
spin_unlock(&pasid_lock);
}
void *intel_pasid_lookup_id(int pasid)
{
void *p;
spin_lock(&pasid_lock);
p = idr_find(&pasid_idr, pasid);
spin_unlock(&pasid_lock);
return p;
}
/*
* Per device pasid table management:

View file

@ -17,6 +17,7 @@
#include <linux/dmar.h>
#include <linux/interrupt.h>
#include <linux/mm_types.h>
#include <linux/ioasid.h>
#include <asm/page.h>
#include "intel-pasid.h"
@ -331,16 +332,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
ret = intel_pasid_alloc_id(svm,
!!cap_caching_mode(iommu->cap),
pasid_max, GFP_KERNEL);
if (ret < 0) {
/* Do not use PASID 0, reserved for RID to PASID */
svm->pasid = ioasid_alloc(NULL, PASID_MIN,
pasid_max - 1, svm);
if (svm->pasid == INVALID_IOASID) {
kfree(svm);
kfree(sdev);
ret = -ENOSPC;
goto out;
}
svm->pasid = ret;
svm->notifier.ops = &intel_mmuops;
svm->mm = mm;
svm->flags = flags;
@ -350,7 +350,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
intel_pasid_free_id(svm->pasid);
ioasid_free(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@ -366,7 +366,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
intel_pasid_free_id(svm->pasid);
ioasid_free(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@ -414,10 +414,15 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
if (!iommu)
goto out;
svm = intel_pasid_lookup_id(pasid);
svm = ioasid_find(NULL, pasid, NULL);
if (!svm)
goto out;
if (IS_ERR(svm)) {
ret = PTR_ERR(svm);
goto out;
}
list_for_each_entry(sdev, &svm->devs, list) {
if (dev == sdev->dev) {
ret = 0;
@ -436,7 +441,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
intel_pasid_free_id(svm->pasid);
ioasid_free(svm->pasid);
if (svm->mm)
mmu_notifier_unregister(&svm->notifier, svm->mm);
@ -471,10 +476,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
if (!iommu)
goto out;
svm = intel_pasid_lookup_id(pasid);
svm = ioasid_find(NULL, pasid, NULL);
if (!svm)
goto out;
if (IS_ERR(svm)) {
ret = PTR_ERR(svm);
goto out;
}
/* init_mm is used in this case */
if (!svm->mm)
ret = 1;
@ -581,13 +590,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!svm || svm->pasid != req->pasid) {
rcu_read_lock();
svm = intel_pasid_lookup_id(req->pasid);
svm = ioasid_find(NULL, req->pasid, NULL);
/* It *can't* go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding.
* So we only need RCU to protect the internal idr code. */
rcu_read_unlock();
if (!svm) {
if (IS_ERR_OR_NULL(svm)) {
pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
iommu->name, req->pasid, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]);