summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorGary King <gking@nvidia.com>2010-05-18 14:06:34 -0700
committerGary King <gking@nvidia.com>2010-05-18 14:12:26 -0700
commitcf64382e46dd0c2148a0c251dc9d420bda4cdb8b (patch)
tree3da23ef30705427304460befb23947cb9b7531b9 /drivers
parent94af0b4ea79b5b633632c342fd7011b7c8cd88fd (diff)
nvmap: fix unpin/pin race condition with RECLAIM_UNPINNED_VM
_nvmap_handle_unpin needs to acquire the mru_vma_lock before decrementing the pin count, to ensure that a decrement to zero and insertion on the MRU VMA list appears atomic with respect to a second client calling _nvmap_handle_pin on the same handle; otherwise, the two clients race and the pin operation may trigger a BUG because the handle has a valid IOVMM area but is not located on any MRU VMA list. also, clean up some additional allocation-inside-spinlock issues; release the MRU VMA lock before calling tegra_iovmm_create_vm, and reacquire the lock after returning. Change-Id: I6703d21266124b3084f10f5b94a12cdeaf43a330
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/nvmap.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/char/nvmap.c b/drivers/char/nvmap.c
index f85dfc2bf7ef..e72e55aa0b23 100644
--- a/drivers/char/nvmap.c
+++ b/drivers/char/nvmap.c
@@ -890,12 +890,11 @@ static struct nvmap_handle *_nvmap_validate_get(unsigned long handle, bool su)
#endif
}
+/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
static inline void _nvmap_insert_mru_vma(struct nvmap_handle *h)
{
#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- spin_lock(&nvmap_mru_vma_lock);
list_add(&h->pgalloc.mru_list, _nvmap_list(h->pgalloc.area->iovm_length));
- spin_unlock(&nvmap_mru_vma_lock);
#endif
}
@@ -923,9 +922,8 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
struct tegra_iovmm_area *vm = NULL;
unsigned int i, idx;
- spin_lock(&nvmap_mru_vma_lock);
-
if (h->pgalloc.area) {
+ spin_lock(&nvmap_mru_vma_lock);
BUG_ON(list_empty(&h->pgalloc.mru_list));
list_del(&h->pgalloc.mru_list);
INIT_LIST_HEAD(&h->pgalloc.mru_list);
@@ -938,7 +936,6 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
if (vm) {
INIT_LIST_HEAD(&h->pgalloc.mru_list);
- spin_unlock(&nvmap_mru_vma_lock);
return vm;
}
/* attempt to re-use the most recently unpinned IOVMM area in the
@@ -946,6 +943,7 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
* evict handles (starting from the current bin) until an allocation
* succeeds or no more areas can be evicted */
+ spin_lock(&nvmap_mru_vma_lock);
mru = _nvmap_list(h->size);
if (!list_empty(mru))
evict = list_first_entry(mru, struct nvmap_handle,
@@ -973,14 +971,15 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
BUG_ON(!evict->pgalloc.area);
list_del(&evict->pgalloc.mru_list);
INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ spin_unlock(&nvmap_mru_vma_lock);
tegra_iovmm_free_vm(evict->pgalloc.area);
evict->pgalloc.area = NULL;
vm = tegra_iovmm_create_vm(nvmap_vm_client,
NULL, h->size,
_nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+ spin_lock(&nvmap_mru_vma_lock);
}
}
-
spin_unlock(&nvmap_mru_vma_lock);
return vm;
#endif
@@ -1427,6 +1426,9 @@ static int _nvmap_handle_unpin(struct nvmap_handle *h)
}
BUG_ON(!h->alloc);
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_lock(&nvmap_mru_vma_lock);
+#endif
if (!atomic_dec_return(&h->pin)) {
if (h->heap_pgalloc && h->pgalloc.area) {
/* if a secure handle is clean (i.e., mapped into
@@ -1439,6 +1441,9 @@ static int _nvmap_handle_unpin(struct nvmap_handle *h)
ret=1;
}
}
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_unlock(&nvmap_mru_vma_lock);
+#endif
_nvmap_handle_put(h);
return ret;
}