summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-tegra/include/mach/iovmm.h3
-rw-r--r--arch/arm/mach-tegra/iovmm.c22
-rw-r--r--drivers/video/tegra/nvmap/nvmap.c133
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h2
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.c15
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.h8
6 files changed, 114 insertions, 69 deletions
diff --git a/arch/arm/mach-tegra/include/mach/iovmm.h b/arch/arm/mach-tegra/include/mach/iovmm.h
index 8f111605e065..e30d3dcf2859 100644
--- a/arch/arm/mach-tegra/include/mach/iovmm.h
+++ b/arch/arm/mach-tegra/include/mach/iovmm.h
@@ -166,6 +166,9 @@ void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm);
/* called by clients to return an iovmm_area to the free pool for the domain */
void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm);
+/* returns size of largest free iovm block */
+size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client);
+
/* called by client software to map the page-aligned I/O address vaddr to
* a specific physical address pfn. I/O VMA should have been created with
* a NULL tegra_iovmm_area_ops structure. */
diff --git a/arch/arm/mach-tegra/iovmm.c b/arch/arm/mach-tegra/iovmm.c
index 1f9e49902188..1afdaf3b53cc 100644
--- a/arch/arm/mach-tegra/iovmm.c
+++ b/arch/arm/mach-tegra/iovmm.c
@@ -90,6 +90,28 @@ static tegra_iovmm_addr_t iovmm_align_down(struct tegra_iovmm_device *dev,
#define iovmprint(fmt, arg...) snprintf(page+len, count-len, fmt, ## arg)
+size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_domain *domain = client->domain;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ tegra_iovmm_addr_t max_free = 0;
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ if (test_bit(BK_free, &b->flags)) {
+ max_free = max_t(tegra_iovmm_addr_t,
+ max_free, iovmm_length(b));
+ }
+ }
+ spin_unlock(&domain->block_lock);
+ return max_free;
+}
+
+
static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
unsigned int *num_blocks, unsigned int *num_free,
tegra_iovmm_addr_t *total, tegra_iovmm_addr_t *total_free,
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c
index e8d795006082..f75fa3afa11e 100644
--- a/drivers/video/tegra/nvmap/nvmap.c
+++ b/drivers/video/tegra/nvmap/nvmap.c
@@ -69,12 +69,14 @@ static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
struct tegra_iovmm_area *area;
BUG_ON(!h->alloc);
+ nvmap_mru_lock(client->share);
if (atomic_inc_return(&h->pin) == 1) {
if (h->heap_pgalloc && !h->pgalloc.contig) {
- area = nvmap_handle_iovmm(client, h);
+ area = nvmap_handle_iovmm_locked(client, h);
if (!area) {
/* no race here, inside the pin mutex */
atomic_dec(&h->pin);
+ nvmap_mru_unlock(client->share);
return -ENOMEM;
}
if (area != h->pgalloc.area)
@@ -82,30 +84,16 @@ static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
h->pgalloc.area = area;
}
}
+ nvmap_mru_unlock(client->share);
return 0;
}
-static int wait_pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
-{
- int ret = 0;
-
- ret = pin_locked(client, h);
-
- if (ret) {
- ret = wait_event_interruptible(client->share->pin_wait,
- !pin_locked(client, h));
- }
-
- return ret ? -EINTR : 0;
-
-}
-
/* doesn't need to be called inside nvmap_pin_lock, since this will only
* expand the available VM area */
-static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
+static int handle_unpin(struct nvmap_client *client,
+ struct nvmap_handle *h, int free_vm)
{
int ret = 0;
-
nvmap_mru_lock(client->share);
if (atomic_read(&h->pin) == 0) {
@@ -125,17 +113,81 @@ static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
tegra_iovmm_zap_vm(h->pgalloc.area);
h->pgalloc.dirty = true;
}
- nvmap_mru_insert_locked(client->share, h);
+ if (free_vm) {
+ tegra_iovmm_free_vm(h->pgalloc.area);
+ h->pgalloc.area = NULL;
+ } else
+ nvmap_mru_insert_locked(client->share, h);
ret = 1;
}
}
nvmap_mru_unlock(client->share);
-
nvmap_handle_put(h);
return ret;
}
+static int pin_array_locked(struct nvmap_client *client,
+ struct nvmap_handle **h, int count)
+{
+ int pinned;
+ int i;
+ int err = 0;
+
+ for (pinned = 0; pinned < count; pinned++) {
+ err = pin_locked(client, h[pinned]);
+ if (err)
+ break;
+ }
+
+ if (err) {
+ /* unpin pinned handles */
+ for (i = 0; i < pinned; i++) {
+ /* inc ref counter, because
+ * handle_unpin decrements it */
+ nvmap_handle_get(h[i]);
+ /* unpin handles and free vm */
+ handle_unpin(client, h[i], true);
+ }
+ }
+
+ if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
+ client->iovm_limit) {
+ /* First attempt to pin in empty iovmm
+ * may still fail because of fragmentation caused by
+ * placing handles in MRU areas. After such failure
+ * all MRU gets cleaned and iovm space is freed.
+ *
+ * We have to do pinning again here since there might be is
+ * no more incoming pin_wait wakeup calls from unpin
+ * operations */
+ for (pinned = 0; pinned < count; pinned++) {
+ err = pin_locked(client, h[pinned]);
+ if (err)
+ break;
+ }
+ if (err) {
+ pr_err("Pinning in empty iovmm failed!!!\n");
+ BUG_ON(1);
+ }
+ }
+ return err;
+}
+
+static int wait_pin_array_locked(struct nvmap_client *client,
+ struct nvmap_handle **h, int count)
+{
+ int ret = 0;
+
+ ret = pin_array_locked(client, h, count);
+
+ if (ret) {
+ ret = wait_event_interruptible(client->share->pin_wait,
+ !pin_array_locked(client, h, count));
+ }
+ return ret ? -EINTR : 0;
+}
+
static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
{
struct nvmap_handle *h;
@@ -152,7 +204,7 @@ static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
current->group_leader->comm, h);
WARN_ON(1);
- w = handle_unpin(client, h);
+ w = handle_unpin(client, h, false);
nvmap_handle_put(h);
return w;
}
@@ -182,7 +234,7 @@ void nvmap_unpin_ids(struct nvmap_client *client,
"handle %08lx\n",
current->group_leader->comm, ids[i]);
} else {
- do_wake |= handle_unpin(client, h);
+ do_wake |= handle_unpin(client, h, false);
}
} else {
nvmap_ref_unlock(client);
@@ -205,7 +257,6 @@ int nvmap_pin_ids(struct nvmap_client *client,
unsigned int nr, const unsigned long *ids)
{
int ret = 0;
- int cnt = 0;
unsigned int i;
struct nvmap_handle **h = (struct nvmap_handle **)ids;
struct nvmap_handle_ref *ref;
@@ -249,20 +300,11 @@ int nvmap_pin_ids(struct nvmap_client *client,
if (WARN_ON(ret))
goto out;
- for (cnt = 0; cnt < nr && !ret; cnt++) {
- ret = wait_pin_locked(client, h[cnt]);
- }
+ ret = wait_pin_array_locked(client, h, nr);
+
mutex_unlock(&client->share->pin_lock);
if (ret) {
- int do_wake = 0;
-
- for (i = 0; i < cnt; i++)
- do_wake |= handle_unpin(client, h[i]);
-
- if (do_wake)
- wake_up(&client->share->pin_wait);
-
ret = -EINTR;
} else {
for (i = 0; i < nr; i++) {
@@ -287,7 +329,7 @@ out:
}
nvmap_ref_unlock(client);
- for (i = cnt; i < nr; i++)
+ for (i = 0; i < nr; i++)
nvmap_handle_put(h[i]);
}
@@ -487,7 +529,6 @@ int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
struct nvmap_handle **unique_arr)
{
int count = 0;
- int pinned = 0;
int ret = 0;
int i;
@@ -507,8 +548,7 @@ int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
for (i = 0; i < count; i++)
unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
- for (pinned = 0; pinned < count && !ret; pinned++)
- ret = wait_pin_locked(client, unique_arr[pinned]);
+ ret = wait_pin_array_locked(client, unique_arr, count);
mutex_unlock(&client->share->pin_lock);
@@ -516,17 +556,8 @@ int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
ret = nvmap_reloc_pin_array(client, arr, nr, gather);
if (WARN_ON(ret)) {
- int do_wake = 0;
-
- for (i = pinned; i < count; i++)
+ for (i = 0; i < count; i++)
nvmap_handle_put(unique_arr[i]);
-
- for (i = 0; i < pinned; i++)
- do_wake |= handle_unpin(client, unique_arr[i]);
-
- if (do_wake)
- wake_up(&client->share->pin_wait);
-
return ret;
} else {
for (i = 0; i < count; i++) {
@@ -555,7 +586,7 @@ unsigned long nvmap_pin(struct nvmap_client *client,
if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
ret = -EINTR;
} else {
- ret = wait_pin_locked(client, h);
+ ret = wait_pin_array_locked(client, &h, 1);
mutex_unlock(&client->share->pin_lock);
}
@@ -590,7 +621,7 @@ unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id)
void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
{
atomic_dec(&ref->pin);
- if (handle_unpin(client, ref->handle))
+ if (handle_unpin(client, ref->handle, false))
wake_up(&client->share->pin_wait);
}
@@ -603,7 +634,7 @@ void nvmap_unpin_handles(struct nvmap_client *client,
for (i = 0; i < nr; i++) {
if (WARN_ON(!h[i]))
continue;
- do_wake |= handle_unpin(client, h[i]);
+ do_wake |= handle_unpin(client, h[i], false);
}
if (do_wake)
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
index 923ff8fc8d8a..48ea8debb80b 100644
--- a/drivers/video/tegra/nvmap/nvmap.h
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -89,7 +89,7 @@ struct nvmap_share {
wait_queue_head_t pin_wait;
struct mutex pin_lock;
#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
- spinlock_t mru_lock;
+ struct mutex mru_lock;
struct list_head *mru_lists;
int nr_mru;
#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.c b/drivers/video/tegra/nvmap/nvmap_mru.c
index 252665427568..f38d0c390ca2 100644
--- a/drivers/video/tegra/nvmap/nvmap_mru.c
+++ b/drivers/video/tegra/nvmap/nvmap_mru.c
@@ -90,7 +90,7 @@ void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
* and if that fails, iteratively evict handles from the MRU lists and free
* their allocations, until the new allocation succeeds.
*/
-struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
struct nvmap_handle *h)
{
struct list_head *mru;
@@ -104,14 +104,9 @@ struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
prot = nvmap_pgprot(h, pgprot_kernel);
if (h->pgalloc.area) {
- /* since this is only called inside the pin lock, and the
- * handle is gotten before it is pinned, there are no races
- * where h->pgalloc.area is changed after the comparison */
- nvmap_mru_lock(c->share);
BUG_ON(list_empty(&h->pgalloc.mru_list));
list_del(&h->pgalloc.mru_list);
INIT_LIST_HEAD(&h->pgalloc.mru_list);
- nvmap_mru_unlock(c->share);
return h->pgalloc.area;
}
@@ -125,8 +120,6 @@ struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
* same size bin as the current handle. If that fails, iteratively
* evict handles (starting from the current bin) until an allocation
* succeeds or no more areas can be evicted */
-
- nvmap_mru_lock(c->share);
mru = mru_list(c->share, h->size);
if (!list_empty(mru))
evict = list_first_entry(mru, struct nvmap_handle,
@@ -137,7 +130,6 @@ struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
vm = evict->pgalloc.area;
evict->pgalloc.area = NULL;
INIT_LIST_HEAD(&evict->pgalloc.mru_list);
- nvmap_mru_unlock(c->share);
return vm;
}
@@ -155,22 +147,19 @@ struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
BUG_ON(!evict->pgalloc.area);
list_del(&evict->pgalloc.mru_list);
INIT_LIST_HEAD(&evict->pgalloc.mru_list);
- nvmap_mru_unlock(c->share);
tegra_iovmm_free_vm(evict->pgalloc.area);
evict->pgalloc.area = NULL;
vm = tegra_iovmm_create_vm(c->share->iovmm,
NULL, h->size, prot);
- nvmap_mru_lock(c->share);
}
}
- nvmap_mru_unlock(c->share);
return vm;
}
int nvmap_mru_init(struct nvmap_share *share)
{
int i;
- spin_lock_init(&share->mru_lock);
+ mutex_init(&share->mru_lock);
share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;
share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.h b/drivers/video/tegra/nvmap/nvmap_mru.h
index bfc7fceae856..79ba8145a3af 100644
--- a/drivers/video/tegra/nvmap/nvmap_mru.h
+++ b/drivers/video/tegra/nvmap/nvmap_mru.h
@@ -35,12 +35,12 @@ struct tegra_iovmm_client;
static inline void nvmap_mru_lock(struct nvmap_share *share)
{
- spin_lock(&share->mru_lock);
+ mutex_lock(&share->mru_lock);
}
static inline void nvmap_mru_unlock(struct nvmap_share *share)
{
- spin_unlock(&share->mru_lock);
+ mutex_unlock(&share->mru_lock);
}
int nvmap_mru_init(struct nvmap_share *share);
@@ -53,7 +53,7 @@ void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h);
void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h);
-struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
struct nvmap_handle *h);
#else
@@ -72,7 +72,7 @@ static inline void nvmap_mru_remove(struct nvmap_share *s,
struct nvmap_handle *h)
{ }
-static inline struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+static inline struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
struct nvmap_handle *h)
{
BUG_ON(!h->pgalloc.area);