summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0285521e0a99..7cd33f75a69c 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -540,11 +540,11 @@ static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
static void flush_devices_by_domain(struct protection_domain *domain)
{
struct amd_iommu *iommu;
- int i;
+ unsigned long i;
for (i = 0; i <= amd_iommu_last_bdf; ++i) {
if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
- (amd_iommu_pd_table[i] != domain))
+ (domain != NULL && amd_iommu_pd_table[i] != domain))
continue;
iommu = amd_iommu_rlookup_table[i];
@@ -1230,9 +1230,10 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
/*
* If we run in passthrough mode the device must be assigned to the
- * passthrough domain if it is detached from any other domain
+ * passthrough domain if it is detached from any other domain.
+ * Make sure we can deassign from the pt_domain itself.
*/
- if (iommu_pass_through) {
+ if (iommu_pass_through && domain != pt_domain) {
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
__attach_device(iommu, pt_domain, devid);
}
@@ -1687,6 +1688,7 @@ static void __unmap_single(struct amd_iommu *iommu,
size_t size,
int dir)
{
+ dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
@@ -1694,6 +1696,7 @@ static void __unmap_single(struct amd_iommu *iommu,
(dma_addr + size > dma_dom->aperture_size))
return;
+ flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -1708,7 +1711,7 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
+ iommu_flush_pages(iommu, dma_dom->domain.id, flush_addr, size);
dma_dom->need_flush = false;
}
}
@@ -2047,10 +2050,10 @@ static void prealloc_protection_domains(void)
struct pci_dev *dev = NULL;
struct dma_ops_domain *dma_dom;
struct amd_iommu *iommu;
- u16 devid;
+ u16 devid, __devid;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- devid = calc_devid(dev->bus->number, dev->devfn);
+ __devid = devid = calc_devid(dev->bus->number, dev->devfn);
if (devid > amd_iommu_last_bdf)
continue;
devid = amd_iommu_alias_table[devid];
@@ -2065,6 +2068,10 @@ static void prealloc_protection_domains(void)
init_unity_mappings_for_device(dma_dom, devid);
dma_dom->target_dev = devid;
+ attach_device(iommu, &dma_dom->domain, devid);
+ if (__devid != devid)
+ attach_device(iommu, &dma_dom->domain, __devid);
+
list_add_tail(&dma_dom->list, &iommu_pd_list);
}
}
@@ -2079,6 +2086,11 @@ static struct dma_map_ops amd_iommu_dma_ops = {
.dma_supported = amd_iommu_dma_supported,
};
+void __init amd_iommu_init_api(void)
+{
+ register_iommu(&amd_iommu_ops);
+}
+
/*
* The function which clues the AMD IOMMU driver into dma_ops.
*/
@@ -2120,8 +2132,6 @@ int __init amd_iommu_init_dma_ops(void)
/* Make the driver finally visible to the drivers */
dma_ops = &amd_iommu_dma_ops;
- register_iommu(&amd_iommu_ops);
-
bus_register_notifier(&pci_bus_type, &device_nb);
amd_iommu_stats_init();
@@ -2231,9 +2241,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
free_pagetable(domain);
- domain_id_free(domain->id);
-
- kfree(domain);
+ protection_domain_free(domain);
dom->priv = NULL;
}