summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/nvmap/nvmap_mru.c
blob: 25266542756826e10d1eb54e164bf52f418888ad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
/*
 * drivers/video/tegra/nvmap_mru.c
 *
 * IOVMM virtualization support for nvmap
 *
 * Copyright (c) 2009-2010, NVIDIA Corporation.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along
 * with this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */

#include <linux/list.h>
#include <linux/slab.h>

#include <asm/pgtable.h>

#include <mach/iovmm.h>

#include "nvmap.h"
#include "nvmap_mru.h"

/* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM),
 * unpinned handles are placed onto a most-recently-used eviction list;
 * multiple lists are maintained, segmented by size (sizes were chosen to
 * roughly correspond with common sizes for graphics surfaces).
 *
 * if a handle is located on the MRU list, then the code below may
 * steal its IOVMM area at any time to satisfy a pin operation if no
 * free IOVMM space is available
 */

static const size_t mru_cutoff[] = {
	262144, 393216, 786432, 1048576, 1572864
};

static inline struct list_head *mru_list(struct nvmap_share *share, size_t size)
{
	unsigned int i;

	BUG_ON(!share->mru_lists);
	for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++)
		if (size <= mru_cutoff[i])
			break;

	return &share->mru_lists[i];
}

size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm)
{
	size_t vm_size = tegra_iovmm_get_vm_size(iovmm);
	return (vm_size >> 2) * 3;
}

/*  nvmap_mru_vma_lock should be acquired by the caller before calling this */
void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h)
{
	size_t len = h->pgalloc.area->iovm_length;
	list_add(&h->pgalloc.mru_list, mru_list(share, len));
}

void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
{
	nvmap_mru_lock(s);
	if (!list_empty(&h->pgalloc.mru_list))
		list_del(&h->pgalloc.mru_list);
	nvmap_mru_unlock(s);
	INIT_LIST_HEAD(&h->pgalloc.mru_list);
}

/* returns a tegra_iovmm_area for a handle. if the handle already has
 * an iovmm_area allocated, the handle is simply removed from its MRU list
 * and the existing iovmm_area is returned.
 *
 * if no existing allocation exists, try to allocate a new IOVMM area.
 *
 * if a new area can not be allocated, try to re-use the most-recently-unpinned
 * handle's allocation.
 *
 * and if that fails, iteratively evict handles from the MRU lists and free
 * their allocations, until the new allocation succeeds.
 */
struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
					    struct nvmap_handle *h)
{
	struct list_head *mru;
	struct nvmap_handle *evict = NULL;
	struct tegra_iovmm_area *vm = NULL;
	unsigned int i, idx;
	pgprot_t prot;

	BUG_ON(!h || !c || !c->share);

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->pgalloc.area) {
		/* since this is only called inside the pin lock, and the
		 * handle is gotten before it is pinned, there are no races
		 * where h->pgalloc.area is changed after the comparison */
		nvmap_mru_lock(c->share);
		BUG_ON(list_empty(&h->pgalloc.mru_list));
		list_del(&h->pgalloc.mru_list);
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		nvmap_mru_unlock(c->share);
		return h->pgalloc.area;
	}

	vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, h->size, prot);

	if (vm) {
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		return vm;
	}
	/* attempt to re-use the most recently unpinned IOVMM area in the
	 * same size bin as the current handle. If that fails, iteratively
	 * evict handles (starting from the current bin) until an allocation
	 * succeeds or no more areas can be evicted */

	nvmap_mru_lock(c->share);
	mru = mru_list(c->share, h->size);
	if (!list_empty(mru))
		evict = list_first_entry(mru, struct nvmap_handle,
					 pgalloc.mru_list);

	if (evict && evict->pgalloc.area->iovm_length >= h->size) {
		list_del(&evict->pgalloc.mru_list);
		vm = evict->pgalloc.area;
		evict->pgalloc.area = NULL;
		INIT_LIST_HEAD(&evict->pgalloc.mru_list);
		nvmap_mru_unlock(c->share);
		return vm;
	}

	idx = mru - c->share->mru_lists;

	for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
		if (idx >= c->share->nr_mru)
			idx = 0;
		mru = &c->share->mru_lists[idx];
		while (!list_empty(mru) && !vm) {
			evict = list_first_entry(mru, struct nvmap_handle,
						 pgalloc.mru_list);

			BUG_ON(atomic_read(&evict->pin) != 0);
			BUG_ON(!evict->pgalloc.area);
			list_del(&evict->pgalloc.mru_list);
			INIT_LIST_HEAD(&evict->pgalloc.mru_list);
			nvmap_mru_unlock(c->share);
			tegra_iovmm_free_vm(evict->pgalloc.area);
			evict->pgalloc.area = NULL;
			vm = tegra_iovmm_create_vm(c->share->iovmm,
						   NULL, h->size, prot);
			nvmap_mru_lock(c->share);
		}
	}
	nvmap_mru_unlock(c->share);
	return vm;
}

int nvmap_mru_init(struct nvmap_share *share)
{
	int i;
	spin_lock_init(&share->mru_lock);
	share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;

	share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
				   GFP_KERNEL);

	if (!share->mru_lists)
		return -ENOMEM;

	for (i = 0; i <= share->nr_mru; i++)
		INIT_LIST_HEAD(&share->mru_lists[i]);

	return 0;
}

void nvmap_mru_destroy(struct nvmap_share *share)
{
	if (share->mru_lists)
		kfree(share->mru_lists);

	share->mru_lists = NULL;
}