summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/nvmap/nvmap_heap.c
diff options
context:
space:
mode:
authorKirill Artamonov <kartamonov@nvidia.com>2011-02-18 14:58:41 +0200
committerVarun Colbert <vcolbert@nvidia.com>2011-02-18 18:47:21 -0800
commit3ac244af4e6600be75f7431baf3418287142e4e1 (patch)
treea8586a60107ca54a0414cdfc30704bccfc78577d /drivers/video/tegra/nvmap/nvmap_heap.c
parent140b49082fd7d937d8a74b4dccc091c941dacb07 (diff)
video: tegra: nvmap: fix potential deadlock
Enabled mutex debugging reavealed potential deadlocks introduced with compaction. Handle spin lock replaced with mutex. Heap functions cannot be protected with spinlock because they call kernel slab allocation functions which cannot be called from atomic context. nvmap_client ref_lock is also replaced with mutex. Otherwise we cannot access heap parameters protected by mutex nvmap_handle lock. Extra locking for handle->owner removed. bug 793364 Change-Id: I635ce9ebf259dd7bf8802457567f93b7be5795ea Reviewed-on: http://git-master/r/19850 Reviewed-by: Kirill Artamonov <kartamonov@nvidia.com> Tested-by: Kirill Artamonov <kartamonov@nvidia.com> Reviewed-by: Daniel Willemsen <dwillemsen@nvidia.com>
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_heap.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c
index 0d7e1cd31bb8..c920048db82b 100644
--- a/drivers/video/tegra/nvmap/nvmap_heap.c
+++ b/drivers/video/tegra/nvmap/nvmap_heap.c
@@ -696,24 +696,22 @@ static struct nvmap_heap_block *do_heap_relocate_listblock(
size_t src_align = block->align;
unsigned int src_prot = block->mem_prot;
int error = 0;
+ struct nvmap_share *share;
if (!handle) {
pr_err("INVALID HANDLE!\n");
return NULL;
}
- spin_lock(&handle->lock);
+ mutex_lock(&handle->lock);
- if (!handle->owner) {
- spin_unlock(&handle->lock);
- return NULL;
- }
+ share = nvmap_get_share_from_dev(handle->dev);
/* TODO: It is possible to use only handle lock and no share
* pin_lock, but then we'll need to lock every handle during
* each pinning operation. Need to estimate performance impact
* if we decide to simplify locking this way. */
- mutex_lock(&handle->owner->share->pin_lock);
+ mutex_lock(&share->pin_lock);
/* abort if block is pinned */
if (atomic_read(&handle->pin))
@@ -755,8 +753,8 @@ static struct nvmap_heap_block *do_heap_relocate_listblock(
BUG_ON(error);
fail:
- mutex_unlock(&handle->owner->share->pin_lock);
- spin_unlock(&handle->lock);
+ mutex_unlock(&share->pin_lock);
+ mutex_unlock(&handle->lock);
return heap_block_new;
}
@@ -829,9 +827,9 @@ static void nvmap_heap_compact(struct nvmap_heap *heap,
void nvmap_usecount_inc(struct nvmap_handle *h)
{
if (h->alloc && !h->heap_pgalloc) {
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
h->usecount++;
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
} else {
h->usecount++;
}