1
0
Fork 0

iommu/vt-d: Fix ineffective devTLB invalidation for subdevices

iommu_flush_dev_iotlb() is called to invalidate caches on a device but
only loops over the devices which are fully-attached to the domain. For
sub-devices, this is ineffective and can result in invalid caching
entries left on the device.

Fix the missing invalidation by adding a loop over the subdevices and
ensuring that 'domain->has_iotlb_device' is updated when attaching to
subdevices.

Fixes: 67b8e02b5e ("iommu/vt-d: Aux-domain specific domain attach/detach")
Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/1609949037-25291-4-git-send-email-yi.l.liu@intel.com
Signed-off-by: Will Deacon <will@kernel.org>
zero-sugar-mainline-defconfig
Liu Yi L 2021-01-07 00:03:57 +08:00 committed by Will Deacon
parent 18abda7a2d
commit 7c29ada5e7
1 changed files with 37 additions and 16 deletions

View File

@ -719,6 +719,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid; return nid;
} }
static void domain_update_iotlb(struct dmar_domain *domain);
/* Some capabilities may be different across iommus */ /* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain) static void domain_update_iommu_cap(struct dmar_domain *domain)
{ {
@ -744,6 +746,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
domain_update_iotlb(domain);
} }
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@ -1464,17 +1468,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock); assert_spin_locked(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) { list_for_each_entry(info, &domain->devices, link)
struct pci_dev *pdev; if (info->ats_enabled) {
if (!info->dev || !dev_is_pci(info->dev))
continue;
pdev = to_pci_dev(info->dev);
if (pdev->ats_enabled) {
has_iotlb_device = true; has_iotlb_device = true;
break; break;
} }
if (!has_iotlb_device) {
struct subdev_domain_info *sinfo;
list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
info = get_domain_info(sinfo->pdev);
if (info && info->ats_enabled) {
has_iotlb_device = true;
break;
}
}
} }
domain->has_iotlb_device = has_iotlb_device; domain->has_iotlb_device = has_iotlb_device;
@ -1555,25 +1564,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif #endif
} }
static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
u64 addr, unsigned int mask)
{
u16 sid, qdep;
if (!info || !info->ats_enabled)
return;
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain, static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask) u64 addr, unsigned mask)
{ {
u16 sid, qdep;
unsigned long flags; unsigned long flags;
struct device_domain_info *info; struct device_domain_info *info;
struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device) if (!domain->has_iotlb_device)
return; return;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) { list_for_each_entry(info, &domain->devices, link)
if (!info->ats_enabled) __iommu_flush_dev_iotlb(info, addr, mask);
continue;
sid = info->bus << 8 | info->devfn; list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
qdep = info->ats_qdep; info = get_domain_info(sinfo->pdev);
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, __iommu_flush_dev_iotlb(info, addr, mask);
qdep, addr, mask);
} }
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
} }