summaryrefslogtreecommitdiff
path: root/drivers/iommu/tegra-smmu.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-07-27 13:29:26 +0100
committerThierry Reding <treding@nvidia.com>2015-08-13 16:06:39 +0200
commit853520fa96511e4a49942d2cba34a329528c7e41 (patch)
treeb34b8dce2abde0c9ef555f509148ffa8c5708da2 /drivers/iommu/tegra-smmu.c
parent0b42c7c1132f331fba263f0d2ca23544770584b7 (diff)
iommu/tegra-smmu: Store struct page pointer for page tables
Store the struct page pointer for the second level page tables, rather than working back from the page directory entry. This is necessary as we want to eliminate the use of physical addresses used with arch-private functions, switching instead to use the streaming DMA API. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/iommu/tegra-smmu.c')
-rw-r--r--drivers/iommu/tegra-smmu.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index bbff5b647183..8ec5ac45caab 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -41,6 +41,7 @@ struct tegra_smmu_as {
struct tegra_smmu *smmu;
unsigned int use_count;
struct page *count;
+ struct page **pts;
struct page *pd;
unsigned id;
u32 attr;
@@ -271,6 +272,14 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
return NULL;
}
+ as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
+ if (!as->pts) {
+ __free_page(as->count);
+ __free_page(as->pd);
+ kfree(as);
+ return NULL;
+ }
+
/* clear PDEs */
pd = page_address(as->pd);
SetPageReserved(as->pd);
@@ -487,14 +496,11 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct page *pt_page;
- u32 *pd;
- pd = page_address(as->pd);
-
- if (!pd[pd_index])
+ pt_page = as->pts[pd_index];
+ if (!pt_page)
return NULL;
- pt_page = pfn_to_page(pd[pd_index] & as->smmu->pfn_mask);
*pagep = pt_page;
return tegra_smmu_pte_offset(pt_page, iova);
@@ -509,7 +515,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
struct page *page;
unsigned int i;
- if (pd[pde] == 0) {
+ if (!as->pts[pde]) {
page = alloc_page(GFP_KERNEL | __GFP_DMA);
if (!page)
return NULL;
@@ -520,6 +526,8 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
for (i = 0; i < SMMU_NUM_PTE; i++)
pt[i] = 0;
+ as->pts[pde] = page;
+
smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
@@ -529,7 +537,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
} else {
- page = pfn_to_page(pd[pde] & smmu->pfn_mask);
+ page = as->pts[pde];
}
*pagep = page;
@@ -550,9 +558,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
unsigned int pde = iova_pd_index(iova);
u32 *count = page_address(as->count);
u32 *pd = page_address(as->pd);
- struct page *page;
-
- page = pfn_to_page(pd[pde] & smmu->pfn_mask);
+ struct page *page = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
@@ -573,6 +579,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
/* Finally, free the page */
ClearPageReserved(page);
__free_page(page);
+ as->pts[pde] = NULL;
}
}