summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-12-01 16:25:41 +0530
committerWinnie Hsu <whsu@nvidia.com>2015-01-19 15:53:03 -0800
commit495f8d31eff57319be13f885e88f4055fa56e772 (patch)
tree2eb0dd2ae9fd0d1746ff9425bad84c5afdb1a95b
parent058f6d9f3ae0927be0c9a525d5cdc660f0a16aa8 (diff)
video: tegra: host: gk20a: fix PDE update sequence
Current sequence : - delete page tables memory - update PDE entry and mark above page tables invalid With this sequence, it is possible to have valid PDE entries with already freed page table and this could lead us to invalid memory accesses. Fix this by switching the sequence as follows : - update PDE entry and mark page tables invalid - delete page tables memory Bug 1577947 Change-Id: Icc3a8c74bbf1bf59e41e0322cfc279d15690aa9d Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/670323 (cherry-picked from commit 56f738b4c4ee188ec1f69b91615cd9728ff18cf0) Reviewed-on: http://git-master/r/671196 Tested-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 52c0f3c5978e..d8fa08ff4971 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1797,6 +1797,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
unmap_gmmu_pages(pte->ref, pte->sgt, pte_kv_cur);
if (pte->ref_cnt == 0) {
+ void *pte_ref_ptr = pte->ref;
+
/* It can make sense to keep around one page table for
* each flavor (empty)... in case a new map is coming
* right back to alloc (and fill it in) again.
@@ -1804,13 +1806,15 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
* unmap/map/unmap/map cases where we'd trigger pte
* free/alloc/free/alloc.
*/
- free_gmmu_pages(vm, pte->ref, pte->sgt,
- vm->mm->page_table_sizing[pgsz_idx].order,
- pte->size);
pte->ref = NULL;
/* rewrite pde */
update_gmmu_pde_locked(vm, pde_i);
+
+ free_gmmu_pages(vm, pte_ref_ptr, pte->sgt,
+ vm->mm->page_table_sizing[pgsz_idx].order,
+ pte->size);
+
}
}