From 7a6610139a1e1d9297dd1c5d178022eac36839cb Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Tue, 2 Nov 2010 08:05:51 +0100 Subject: intel-iommu: Fix use after release during device attach Obtain the new pgd pointer before releasing the page containing this value. Cc: stable@kernel.org Signed-off-by: Jan Kiszka Reviewed-by: Sheng Yang Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/pci') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 4789f8e8bf7a..35463ddf10a1 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -3627,9 +3627,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, pte = dmar_domain->pgd; if (dma_pte_present(pte)) { - free_pgtable_page(dmar_domain->pgd); dmar_domain->pgd = (struct dma_pte *) phys_to_virt(dma_pte_addr(pte)); + free_pgtable_page(pte); } dmar_domain->agaw--; } -- cgit v1.2.3 From a97590e56d0d58e1dd262353f7cbd84e81d8e600 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 4 Mar 2011 14:52:16 -0700 Subject: intel-iommu: Unlink domain from iommu When we remove a device, we unlink the iommu from the domain, but we never do the reverse unlinking of the domain from the iommu. This means that we never clear iommu->domain_ids, eventually leading to resource exhaustion if we repeatedly bind and unbind a device to a driver. Also free empty domains to avoid a resource leak. Signed-off-by: Alex Williamson Acked-by: Donald Dutile Signed-off-by: David Woodhouse Cc: stable@kernel.org --- drivers/pci/intel-iommu.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/pci') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 35463ddf10a1..292f2233295e 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -3260,9 +3260,15 @@ static int device_notifier(struct notifier_block *nb, if (!domain) return 0; - if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) + if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { domain_remove_one_dev_info(domain, pdev); + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && + list_empty(&domain->devices)) + domain_exit(domain); + } + return 0; } @@ -3411,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, domain->iommu_count--; domain_update_iommu_cap(domain); spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); + + spin_lock_irqsave(&iommu->lock, tmp_flags); + clear_bit(domain->id, iommu->domain_ids); + iommu->domains[domain->id] = NULL; + spin_unlock_irqrestore(&iommu->lock, tmp_flags); } spin_unlock_irqrestore(&device_domain_lock, flags); -- cgit v1.2.3 From 2fe9723df8e45fd247782adea244a5e653c30bf4 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 4 Mar 2011 14:52:30 -0700 Subject: intel-iommu: Fix get_domain_for_dev() error path If we run out of domain_ids and fail iommu_attach_domain(), we fall into domain_exit() without having setup enough of the domain structure for this to do anything useful. In fact, it typically runs off into the weeds walking the bogus domain->devices list. Just free the domain. Signed-off-by: Alex Williamson Acked-by: Donald Dutile Signed-off-by: David Woodhouse Cc: stable@kernel.org --- drivers/pci/intel-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/pci') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 292f2233295e..5dc5d3e3508e 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1835,7 +1835,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ret = iommu_attach_domain(domain, iommu); if (ret) { - domain_exit(domain); + free_domain_mem(domain); goto error; } -- cgit v1.2.3 From 51a63e67da6056c13b5b597dcc9e1b3bd7ceaa55 Mon Sep 17 00:00:00 2001 From: Joseph Cihula Date: Mon, 21 Mar 2011 11:04:24 -0700 Subject: intel_iommu: disable all VT-d PMRs when TXT launched Intel VT-d Protected Memory Regions (PMRs) are supposed to be disabled, on each VT-d engine, after DMA remapping is enabled on the engines. This is because the behavior of having both enabled is not deterministic and because, if TXT has been used to launch the kernel, the PMRs may be programmed to cover memory regions that will be used for DMA. Under some circumstances (certain quirks detected, lack of multiple devices, etc.), the current code does not set up DMA remapping on some VT-d engines. In such cases it also skips disabling the PMRs. This causes failures when the kernel is launched with TXT (most often this occurs on the graphics engine and results in colored vertical bars on the display). This patch detects when the kernel has been launched with TXT and then disables the PMRs on all VT-d engines. In some cases where the reason that remapping is not being enabled is due to possible ACPI DMAR table errors, the VT-d engine addresses may not be correct and thus not able to be safely programmed even to disable PMRs. Because part of the TXT launch process is the verification of these addresses, it will always be safe to disable PMRs if the TXT launch has succeeded and hence only doing this in such cases. Signed-off-by: Joseph Cihula Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 5dc5d3e3508e..cdded1e2e660 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain, static struct iova_domain reserved_iova_list; static struct lock_class_key reserved_rbtree_key; -static void dmar_init_reserved_ranges(void) +static int dmar_init_reserved_ranges(void) { struct pci_dev *pdev = NULL; struct iova *iova; @@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void) /* IOAPIC ranges shouldn't be accessed by DMA */ iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), IOVA_PFN(IOAPIC_RANGE_END)); - if (!iova) + if (!iova) { printk(KERN_ERR "Reserve IOAPIC range failed\n"); + return -ENODEV; + } /* Reserve all PCI MMIO to avoid peer-to-peer access */ for_each_pci_dev(pdev) { @@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void) iova = reserve_iova(&reserved_iova_list, IOVA_PFN(r->start), IOVA_PFN(r->end)); - if (!iova) + if (!iova) { printk(KERN_ERR "Reserve iova failed\n"); + return -ENODEV; + } } } - + return 0; } static void domain_reserve_special_ranges(struct dmar_domain *domain) @@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw) return 0; } -int __init init_dmars(void) +static int __init init_dmars(int force_on) { struct dmar_drhd_unit *drhd; struct dmar_rmrr_unit *rmrr; @@ -2393,8 +2397,15 @@ int __init init_dmars(void) * enable translation */ for_each_drhd_unit(drhd) { - if (drhd->ignored) + if (drhd->ignored) { + /* + * we always have to disable PMRs or DMA may fail on + * this device + */ + if (force_on) + iommu_disable_protect_mem_regions(drhd->iommu); continue; + } iommu = drhd->iommu; iommu_flush_write_buffer(iommu); @@ -3303,12 +3314,21 @@ int __init intel_iommu_init(void) if (no_iommu || dmar_disabled) return -ENODEV; - iommu_init_mempool(); - dmar_init_reserved_ranges(); + if (iommu_init_mempool()) { + if (force_on) + panic("tboot: Failed to initialize iommu memory\n"); + return -ENODEV; + } + + if (dmar_init_reserved_ranges()) { + if (force_on) + panic("tboot: Failed to reserve iommu ranges\n"); + return -ENODEV; + } init_no_remapping_devices(); - ret = init_dmars(); + ret = init_dmars(force_on); if (ret) { if (force_on) panic("tboot: Failed to initialize DMARs\n"); -- cgit v1.2.3