summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary King <gking@nvidia.com>2010-08-31 14:04:37 -0700
committerDan Willemsen <dwillemsen@nvidia.com>2011-11-30 21:36:01 -0800
commite1de26a63f64127ba8aee9f80e0a56b24e247cdf (patch)
tree36013786e3a6e9771aa746eb86f0375539cd9e06
parent87e7e9b170105c5d311f86a45223cc36bd24f3bb (diff)
video: tegra: add GPU memory management driver (nvmap)
nvmap provides an interface for user- and kernel-space clients to allocate and access memory "handles" which can be pinned to enable the memory to be shared with DMA devices on the system, and may also be mapped (using caller-specified cache attributes) so that they are directly accessible by the CPU. the memory handle object gives clients a common API to allocate from multiple types of memory: platform-reserved physically contiguous "carveout" memory, physically contiguous (order > 0) OS pages, or physically discontiguous order-0 OS pages that can be remapped into a contiguous region of the DMA device's virtual address space through the tegra IOVMM subsystem. unpinned and unmapped memory handles are relocatable at run-time by the nvmap system. handles may also be shared between multiple clients, allowing (for example) a window manager and its client applications to directly share framebuffers Change-Id: Ie8ead17fe7ab64f1c27d922b1b494f2487a478b6 Signed-off-by: Gary King <gking@nvidia.com>
-rw-r--r--arch/arm/mach-tegra/include/mach/nvmap.h116
-rw-r--r--drivers/video/tegra/Kconfig25
-rw-r--r--drivers/video/tegra/Makefile1
-rw-r--r--drivers/video/tegra/nvmap/Makefile6
-rw-r--r--drivers/video/tegra/nvmap/nvmap.c724
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h218
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c917
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c457
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.c807
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.h62
-rw-r--r--drivers/video/tegra/nvmap/nvmap_ioctl.c629
-rw-r--r--drivers/video/tegra/nvmap/nvmap_ioctl.h159
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.c194
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.h84
14 files changed, 4399 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/include/mach/nvmap.h b/arch/arm/mach-tegra/include/mach/nvmap.h
new file mode 100644
index 000000000000..41f06f532c39
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/nvmap.h
@@ -0,0 +1,116 @@
+/*
+ * include/linux/nvmap.h
+ *
+ * structure declarations for nvmem and nvmap user-space ioctls
+ *
+ * Copyright (c) 2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#ifndef __NVMAP_H
+#define __NVMAP_H
+
+#define NVMAP_HEAP_SYSMEM (1ul<<31)
+#define NVMAP_HEAP_IOVMM (1ul<<30)
+
+/* common carveout heaps */
+#define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29)
+#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
+
+#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
+
+/* allocation flags */
+#define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
+#define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
+#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
+#define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
+#define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
+
+#define NVMAP_HANDLE_SECURE (0x1ul << 2)
+
+
+#if defined(__KERNEL__)
+
+struct nvmap_handle_ref;
+struct nvmap_handle;
+struct nvmap_client;
+struct nvmap_device;
+
+#define nvmap_ref_to_handle(_ref) (*(struct nvmap_handle **)(_ref))
+#define nvmap_id_to_handle(_id) ((struct nvmap_handle *)(_id))
+
+struct nvmap_pinarray_elem {
+ __u32 patch_mem;
+ __u32 patch_offset;
+ __u32 pin_mem;
+ __u32 pin_offset;
+};
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev);
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags);
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+void *nvmap_mmap(struct nvmap_handle_ref *r);
+
+void nvmap_munmap(struct nvmap_handle_ref *r, void *addr);
+
+struct nvmap_client *nvmap_client_get_file(int fd);
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
+
+void nvmap_client_put(struct nvmap_client *c);
+
+unsigned long nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r);
+
+unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id);
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique);
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr);
+
+struct nvmap_platform_carveout {
+ const char *name;
+ unsigned int usage_mask;
+ unsigned long base;
+ size_t size;
+ size_t buddy_size;
+};
+
+struct nvmap_platform_data {
+ const struct nvmap_platform_carveout *carveouts;
+ unsigned int nr_carveouts;
+};
+
+extern struct nvmap_device *nvmap_dev;
+
+#endif
+
+#endif
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
index ee3bf59d88c1..f9192c6d68b8 100644
--- a/drivers/video/tegra/Kconfig
+++ b/drivers/video/tegra/Kconfig
@@ -1,3 +1,7 @@
+if ARCH_TEGRA
+
+comment "NVIDIA Tegra Display Driver options"
+
config TEGRA_GRHOST
tristate "Tegra graphics host driver"
depends on TEGRA_IOVMM
@@ -22,3 +26,24 @@ config FB_TEGRA
default FB
help
Framebuffer device support for the Tegra display controller.
+
+config TEGRA_NVMAP
+ bool "Tegra GPU memory management driver"
+ select ARM_ATTRIB_ALLOCATOR
+ default y
+ help
+ Say Y here to include the memory management driver for the Tegra
+ GPU, multimedia and display subsystems
+
+config NVMAP_RECLAIM_UNPINNED_VM
+ bool "Allow /dev/nvmap to reclaim unpinned I/O virtual memory"
+ depends on TEGRA_NVMAP && TEGRA_IOVMM
+ default y
+ help
+ Say Y here to enable /dev/nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other objects. This can
+ allow a larger virtual I/O VM space than would normally be
+ supported by the hardware, at a slight cost in performance.
+
+endif
+
diff --git a/drivers/video/tegra/Makefile b/drivers/video/tegra/Makefile
index 177ec484c10a..ef9e709303df 100644
--- a/drivers/video/tegra/Makefile
+++ b/drivers/video/tegra/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_TEGRA_GRHOST) += host/
obj-$(CONFIG_TEGRA_DC) += dc/
obj-$(CONFIG_FB_TEGRA) += fb.o
+obj-$(CONFIG_TEGRA_NVMAP) += nvmap/
diff --git a/drivers/video/tegra/nvmap/Makefile b/drivers/video/tegra/nvmap/Makefile
new file mode 100644
index 000000000000..59449abc3edc
--- /dev/null
+++ b/drivers/video/tegra/nvmap/Makefile
@@ -0,0 +1,6 @@
+obj-y += nvmap.o
+obj-y += nvmap_dev.o
+obj-y += nvmap_handle.o
+obj-y += nvmap_heap.o
+obj-y += nvmap_ioctl.o
+obj-${CONFIG_NVMAP_RECLAIM_UNPINNED_VM} += nvmap_mru.o \ No newline at end of file
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c
new file mode 100644
index 000000000000..7419731a7c52
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap.c
@@ -0,0 +1,724 @@
+/*
+ * drivers/video/tegra/nvmap.c
+ *
+ * Memory manager for Tegra GPU
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/rbtree.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* private nvmap_handle flag for pinning duplicate detection */
+#define NVMAP_HANDLE_VISITED (0x1ul << 31)
+
+/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
+static void map_iovmm_area(struct nvmap_handle *h)
+{
+ tegra_iovmm_addr_t va;
+ unsigned long i;
+
+ BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
+ BUG_ON(h->size & ~PAGE_MASK);
+ WARN_ON(!h->pgalloc.dirty);
+
+ for (va = h->pgalloc.area->iovm_start, i = 0;
+ va < (h->pgalloc.area->iovm_start + h->size);
+ i++, va += PAGE_SIZE) {
+ BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
+ tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
+ page_to_pfn(h->pgalloc.pages[i]));
+ }
+ h->pgalloc.dirty = false;
+}
+
+/* must be called inside nvmap_pin_lock, to ensure that an entire stream
+ * of pins will complete without racing with a second stream. handle should
+ * have nvmap_handle_get (or nvmap_validate_get) called before calling
+ * this function. */
+static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ struct tegra_iovmm_area *area;
+ BUG_ON(!h->alloc);
+
+ if (atomic_inc_return(&h->pin) == 1) {
+ if (h->heap_pgalloc && !h->pgalloc.contig) {
+ area = nvmap_handle_iovmm(client, h);
+ if (!area) {
+ /* no race here, inside the pin mutex */
+ atomic_dec(&h->pin);
+ return -ENOMEM;
+ }
+ if (area != h->pgalloc.area)
+ h->pgalloc.dirty = true;
+ h->pgalloc.area = area;
+ }
+ }
+ return 0;
+}
+
+static int wait_pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ int ret = 0;
+
+ ret = pin_locked(client, h);
+
+ if (ret) {
+ ret = wait_event_interruptible(client->share->pin_wait,
+ !pin_locked(client, h));
+ }
+
+ return ret ? -EINTR : 0;
+
+}
+
+/* doesn't need to be called inside nvmap_pin_lock, since this will only
+ * expand the available VM area */
+static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ int ret = 0;
+
+ nvmap_mru_lock(client->share);
+
+ if (atomic_read(&h->pin) == 0) {
+ nvmap_err(client, "%s unpinning unpinned handle %p\n",
+ current->group_leader->comm, h);
+ nvmap_mru_unlock(client->share);
+ return 0;
+ }
+
+ BUG_ON(!h->alloc);
+
+ if (!atomic_dec_return(&h->pin)) {
+ if (h->heap_pgalloc && h->pgalloc.area) {
+ /* if a secure handle is clean (i.e., mapped into
+ * IOVMM, it needs to be zapped on unpin. */
+ if (h->secure && !h->pgalloc.dirty) {
+ tegra_iovmm_zap_vm(h->pgalloc.area);
+ h->pgalloc.dirty = true;
+ }
+ nvmap_mru_insert_locked(client->share, h);
+ ret = 1;
+ }
+ }
+
+ nvmap_mru_unlock(client->share);
+
+ nvmap_handle_put(h);
+ return ret;
+}
+
+static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle *h;
+ int w;
+
+ h = nvmap_validate_get(client, id);
+ if (unlikely(!h)) {
+ nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
+ current->group_leader->comm, (void *)id);
+ return 0;
+ }
+
+ nvmap_err(client, "%s unpinning unreferenced handle %p\n",
+ current->group_leader->comm, h);
+ WARN_ON(1);
+
+ w = handle_unpin(client, h);
+ nvmap_handle_put(h);
+ return w;
+}
+
+void nvmap_unpin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ unsigned int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (!ids[i])
+ continue;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ struct nvmap_handle *h = ref->handle;
+ int e = atomic_add_unless(&ref->pin, -1, 0);
+
+ nvmap_ref_unlock(client);
+
+ if (!e) {
+ nvmap_err(client, "%s unpinning unpinned "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ } else {
+ do_wake |= handle_unpin(client, h);
+ }
+ } else {
+ nvmap_ref_unlock(client);
+ if (client->super)
+ do_wake |= handle_unpin_noref(client, ids[i]);
+ else
+ nvmap_err(client, "%s unpinning invalid "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ }
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+/* pins a list of handle_ref objects; same conditions apply as to
+ * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ int ret = 0;
+ int cnt = 0;
+ unsigned int i;
+ struct nvmap_handle **h = (struct nvmap_handle **)ids;
+ struct nvmap_handle_ref *ref;
+
+ /* to optimize for the common case (client provided valid handle
+ * references and the pin succeeds), increment the handle_ref pin
+ * count during validation. in error cases, the tree will need to
+ * be re-walked, since the handle_ref is discarded so that an
+ * allocation isn't required. if a handle_ref is not found,
+ * locally validate that the caller has permission to pin the handle;
+ * handle_refs are not created in this case, so it is possible that
+ * if the caller crashes after pinning a global handle, the handle
+ * will be permanently leaked. */
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr && !ret; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ atomic_inc(&ref->pin);
+ nvmap_handle_get(h[i]);
+ } else if (!client->super && (h[i]->owner != client) &&
+ !h[i]->global) {
+ ret = -EPERM;
+ } else {
+ nvmap_warn(client, "%s pinning unreferenced handle "
+ "%p\n", current->group_leader->comm, h[i]);
+ }
+ }
+ nvmap_ref_unlock(client);
+
+ nr = i;
+
+ if (ret)
+ goto out;
+
+ ret = mutex_lock_interruptible(&client->share->pin_lock);
+ if (WARN_ON(ret))
+ goto out;
+
+ for (cnt = 0; cnt < nr && !ret; cnt++) {
+ ret = wait_pin_locked(client, h[cnt]);
+ }
+ mutex_unlock(&client->share->pin_lock);
+
+ if (ret) {
+ int do_wake = 0;
+
+ for (i = 0; i < cnt; i++)
+ do_wake |= handle_unpin(client, h[i]);
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+
+ ret = -EINTR;
+ } else {
+ for (i = 0; i < nr; i++) {
+ if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
+ map_iovmm_area(h[i]);
+ }
+ }
+
+out:
+ if (ret) {
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (!ref) {
+ nvmap_warn(client, "%s freed handle %p "
+ "during pinning\n",
+ current->group_leader->comm,
+ (void *)ids[i]);
+ continue;
+ }
+ atomic_dec(&ref->pin);
+ }
+ nvmap_ref_unlock(client);
+
+ for (i = cnt; i < nr; i++)
+ nvmap_handle_put(h[i]);
+ }
+
+ return ret;
+}
+
+static unsigned long handle_phys(struct nvmap_handle *h)
+{
+ u32 addr;
+
+ if (h->heap_pgalloc && h->pgalloc.contig) {
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ } else if (h->heap_pgalloc) {
+ BUG_ON(!h->pgalloc.area);
+ addr = h->pgalloc.area->iovm_start;
+ } else {
+ addr = h->carveout->base;
+ }
+
+ return addr;
+}
+
+/* stores the physical address (+offset) of each handle relocation entry
+ * into its output location. see nvmap_pin_array for more details.
+ *
+ * each entry in arr (i.e., each relocation request) specifies two handles:
+ * the handle to pin (pin), and the handle where the address of pin should be
+ * written (patch). in pseudocode, this loop basically looks like:
+ *
+ * for (i = 0; i < nr; i++) {
+ * (pin, pin_offset, patch, patch_offset) = arr[i];
+ * patch[patch_offset] = address_of(pin) + pin_offset;
+ * }
+ */
+static int nvmap_reloc_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle *gather)
+{
+ struct nvmap_handle *last_patch = NULL;
+ unsigned int last_pfn = 0;
+ pte_t **pte;
+ void *addr;
+ int i;
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle *patch;
+ struct nvmap_handle *pin;
+ unsigned long reloc_addr;
+ unsigned long phys;
+ unsigned int pfn;
+
+ /* all of the handles are validated and get'ted prior to
+ * calling this function, so casting is safe here */
+ pin = (struct nvmap_handle *)arr[i].pin_mem;
+
+ if (arr[i].patch_mem == (unsigned long)last_patch) {
+ patch = last_patch;
+ } else if (arr[i].patch_mem == (unsigned long)gather) {
+ patch = gather;
+ } else {
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ patch = nvmap_get_handle_id(client, arr[i].patch_mem);
+ if (!patch) {
+ nvmap_free_pte(client->dev, pte);
+ return -EPERM;
+ }
+ last_patch = patch;
+ }
+
+ if (patch->heap_pgalloc) {
+ unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
+ phys = page_to_phys(patch->pgalloc.pages[page]);
+ phys += (arr[i].patch_offset & ~PAGE_MASK);
+ } else {
+ phys = patch->carveout->base + arr[i].patch_offset;
+ }
+
+ pfn = __phys_to_pfn(phys);
+ if (pfn != last_pfn) {
+ pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel);
+ unsigned long kaddr = (unsigned long)addr;
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(kaddr);
+ }
+
+ reloc_addr = handle_phys(pin) + arr[i].pin_offset;
+ __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK));
+ }
+
+ nvmap_free_pte(client->dev, pte);
+
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ wmb();
+
+ return 0;
+}
+
+static int nvmap_validate_get_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle **h)
+{
+ int i;
+ int ret = 0;
+ int count = 0;
+
+ nvmap_ref_lock(client);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (need_resched()) {
+ nvmap_ref_unlock(client);
+ schedule();
+ nvmap_ref_lock(client);
+ }
+
+ ref = _nvmap_validate_id_locked(client, arr[i].pin_mem);
+
+ if (!ref || !ref->handle || !ref->handle->alloc) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* a handle may be referenced multiple times in arr, but
+ * it will only be pinned once; this ensures that the
+ * minimum number of sync-queue slots in the host driver
+ * are dedicated to storing unpin lists, which allows
+ * for greater parallelism between the CPU and graphics
+ * processor */
+ if (ref->handle->flags & NVMAP_HANDLE_VISITED)
+ continue;
+
+ ref->handle->flags |= NVMAP_HANDLE_VISITED;
+
+ h[count] = nvmap_handle_get(ref->handle);
+ BUG_ON(!h[count]);
+ count++;
+ }
+
+ nvmap_ref_unlock(client);
+
+ if (ret) {
+ for (i = 0; i < count; i++) {
+ h[i]->flags &= ~NVMAP_HANDLE_VISITED;
+ nvmap_handle_put(h[i]);
+ }
+ }
+
+ return ret ?: count;
+}
+
+/* a typical mechanism host1x clients use for using the Tegra graphics
+ * processor is to build a command buffer which contains relocatable
+ * memory handle commands, and rely on the kernel to convert these in-place
+ * to addresses which are understood by the GPU hardware.
+ *
+ * this is implemented by having clients provide a sideband array
+ * of relocatable handles (+ offsets) and the location in the command
+ * buffer handle to patch with the GPU address when the client submits
+ * its command buffer to the host1x driver.
+ *
+ * the host driver also uses this relocation mechanism internally to
+ * relocate the client's (unpinned) command buffers into host-addressable
+ * memory.
+ *
+ * @client: nvmap_client which should be used for validation; should be
+ * owned by the process which is submitting command buffers
+ * @gather: special handle for relocated command buffer outputs used
+ * internally by the host driver. if this handle is encountered
+ * as an output handle in the relocation array, it is assumed
+ * to be a known-good output and is not validated.
+ * @arr: array of ((relocatable handle, offset), (output handle, offset))
+ * tuples.
+ * @nr: number of entries in arr
+ * @unique_arr: list of nvmap_handle objects which were pinned by
+ * nvmap_pin_array. must be unpinned by the caller after the
+ * command buffers referenced in gather have completed.
+ */
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique_arr)
+{
+ int count = 0;
+ int pinned = 0;
+ int ret = 0;
+ int i;
+
+ if (mutex_lock_interruptible(&client->share->pin_lock)) {
+ nvmap_warn(client, "%s interrupted when acquiring pin lock\n",
+ current->group_leader->comm);
+ return -EINTR;
+ }
+
+ count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr);
+ if (count < 0) {
+ mutex_unlock(&client->share->pin_lock);
+ return count;
+ }
+
+ for (i = 0; i < count; i++)
+ unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
+
+ for (pinned = 0; pinned < count && !ret; pinned++)
+ ret = wait_pin_locked(client, unique_arr[pinned]);
+
+ mutex_unlock(&client->share->pin_lock);
+
+ if (!ret)
+ ret = nvmap_reloc_pin_array(client, arr, nr, gather);
+
+ if (WARN_ON(ret)) {
+ int do_wake = 0;
+
+ for (i = pinned; i < count; i++)
+ nvmap_handle_put(unique_arr[i]);
+
+ for (i = 0; i < pinned; i++)
+ do_wake |= handle_unpin(client, unique_arr[i]);
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+
+ return ret;
+ } else {
+ for (i = 0; i < count; i++) {
+ if (unique_arr[i]->heap_pgalloc &&
+ unique_arr[i]->pgalloc.dirty)
+ map_iovmm_area(unique_arr[i]);
+ }
+ }
+
+ return count;
+}
+
+unsigned long nvmap_pin(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ unsigned long phys;
+ int ret = 0;
+
+ h = nvmap_handle_get(ref->handle);
+ if (WARN_ON(!h))
+ return -EINVAL;
+
+ atomic_inc(&ref->pin);
+
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ /* if IOVMM reclaiming is enabled, IOVMM-backed allocations should
+ * only be pinned through the nvmap_pin_array mechanism, since that
+ * interface guarantees that handles are unpinned when the pinning
+ * command buffers have completed. */
+ WARN_ON(h->heap_pgalloc && !h->pgalloc.contig);
+#endif
+
+ if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
+ ret = -EINTR;
+ } else {
+ ret = wait_pin_locked(client, h);
+ mutex_unlock(&client->share->pin_lock);
+ }
+
+ if (ret) {
+ atomic_dec(&ref->pin);
+ nvmap_handle_put(h);
+ } else {
+ if (h->heap_pgalloc && h->pgalloc.dirty)
+ map_iovmm_area(h);
+ phys = handle_phys(h);
+ }
+
+ return ret ?: phys;
+}
+
+unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id)
+{
+ struct nvmap_handle *h;
+ unsigned long phys;
+
+ h = nvmap_get_handle_id(c, id);
+ if (!h)
+ return -EPERM;
+
+ phys = handle_phys(h);
+ nvmap_handle_put(h);
+
+ return phys;
+}
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
+{
+ atomic_dec(&ref->pin);
+ if (handle_unpin(client, ref->handle))
+ wake_up(&client->share->pin_wait);
+}
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr)
+{
+ int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ if (WARN_ON(!h[i]))
+ continue;
+ do_wake |= handle_unpin(client, h[i]);
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+void *nvmap_mmap(struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ pgprot_t prot;
+ unsigned long adj_size;
+ unsigned long offs;
+ struct vm_struct *v;
+ void *p;
+
+ h = nvmap_handle_get(ref->handle);
+ if (!h)
+ return NULL;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->heap_pgalloc && h->pgalloc.contig &&
+ !PageHighMem(h->pgalloc.pages[0]))
+ return page_address(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc)
+ return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
+ -1, prot);
+
+ /* carveout - explicitly map the pfns into a vmalloc area */
+ adj_size = h->carveout->base & ~PAGE_MASK;
+ adj_size += h->size;
+ adj_size = PAGE_ALIGN(adj_size);
+
+ v = alloc_vm_area(adj_size);
+ if (!v) {
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ p = v->addr + (h->carveout->base & ~PAGE_MASK);
+
+ for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
+ unsigned long addr = (unsigned long) v->addr + offs;
+ unsigned int pfn;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pfn = __phys_to_pfn(h->carveout->base + offs);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ break;
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ break;
+ set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(addr);
+ }
+
+ if (offs != adj_size) {
+ free_vm_area(v);
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ /* leave the handle ref count incremented by 1, so that
+ * the handle will not be freed while the kernel mapping exists.
+ * nvmap_handle_put will be called by unmapping this address */
+ return p;
+}
+
+void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
+{
+ struct nvmap_handle *h;
+
+ if (!ref)
+ return;
+
+ h = ref->handle;
+
+ if (h->heap_pgalloc && (!h->pgalloc.contig ||
+ PageHighMem(h->pgalloc.pages[0]))) {
+ vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
+ } else if (!h->heap_pgalloc) {
+ struct vm_struct *vm;
+ addr -= (h->carveout->base & ~PAGE_MASK);
+ vm = remove_vm_area(addr);
+ BUG_ON(!vm);
+ }
+
+ nvmap_handle_put(h);
+}
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags)
+{
+ const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
+ NVMAP_HEAP_CARVEOUT_GENERIC);
+ struct nvmap_handle_ref *r = NULL;
+ int err;
+
+ r = nvmap_create_handle(client, size);
+ if (IS_ERR(r))
+ return r;
+
+ err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
+ default_heap, align, flags);
+
+ if (err) {
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+ return ERR_PTR(err);
+ }
+
+ return r;
+}
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+}
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
new file mode 100644
index 000000000000..5ba5793d3257
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -0,0 +1,218 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap.h
+ *
+ * GPU memory management driver for Tegra
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *'
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
+#define __VIDEO_TEGRA_NVMAP_NVMAP_H
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <asm/atomic.h>
+
+#include <mach/nvmap.h>
+
+#include "nvmap_heap.h"
+
+#define nvmap_err(_client, _fmt, ...) \
+ dev_err(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_warn(_client, _fmt, ...) \
+ dev_warn(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_debug(_client, _fmt, ...) \
+ dev_dbg(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_ref_to_id(_ref) ((unsigned long)(_ref)->handle)
+
+struct nvmap_device;
+struct page;
+struct tegra_iovmm_area;
+
+/* handles allocated using shared system memory (either IOVMM- or high-order
+ * page allocations */
+struct nvmap_pgalloc {
+ struct page **pages;
+ struct tegra_iovmm_area *area;
+ struct list_head mru_list; /* MRU entry for IOVMM reclamation */
+ bool contig; /* contiguous system memory */
+ bool dirty; /* area is invalid and needs mapping */
+};
+
+struct nvmap_handle {
+ struct rb_node node; /* entry on global handle tree */
+ atomic_t ref; /* reference count (i.e., # of duplications) */
+ atomic_t pin; /* pin count */
+ unsigned long flags;
+ size_t size; /* padded (as-allocated) size */
+ size_t orig_size; /* original (as-requested) size */
+ struct nvmap_client *owner;
+ union {
+ struct nvmap_pgalloc pgalloc;
+ struct nvmap_heap_block *carveout;
+ };
+ bool global; /* handle may be duplicated by other clients */
+ bool secure; /* zap IOVMM area on unpin */
+ bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
+ bool alloc; /* handle has memory allocated */
+};
+
+struct nvmap_share {
+ struct tegra_iovmm_client *iovmm;
+ wait_queue_head_t pin_wait;
+ struct mutex pin_lock;
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ spinlock_t mru_lock;
+ struct list_head *mru_lists;
+ int nr_mru;
+#endif
+};
+
+struct nvmap_client {
+ struct nvmap_device *dev;
+ struct nvmap_share *share;
+ struct rb_root handle_refs;
+ atomic_t iovm_commit;
+ size_t iovm_limit;
+ spinlock_t ref_lock;
+ bool super;
+ atomic_t count;
+};
+
+/* handle_ref objects are client-local references to an nvmap_handle;
+ * they are distinct objects so that handles can be unpinned and
+ * unreferenced the correct number of times when a client abnormally
+ * terminates */
+struct nvmap_handle_ref {
+ struct nvmap_handle *handle;
+ struct rb_node node;
+ atomic_t dupes; /* number of times to free on file close */
+ atomic_t pin; /* number of times to unpin on free */
+};
+
+struct nvmap_vma_priv {
+ struct nvmap_handle *handle;
+ size_t offs;
+ atomic_t count; /* number of processes cloning the VMA */
+};
+
+static inline void nvmap_ref_lock(struct nvmap_client *priv)
+{
+ spin_lock(&priv->ref_lock);
+}
+
+static inline void nvmap_ref_unlock(struct nvmap_client *priv)
+{
+ spin_unlock(&priv->ref_lock);
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client);
+
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
+
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
+
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot);
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b);
+
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long handle);
+
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
+ unsigned long id);
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size);
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags);
+
+void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
+
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids);
+
+void nvmap_unpin_ids(struct nvmap_client *priv,
+ unsigned int nr, const unsigned long *ids);
+
+void _nvmap_handle_free(struct nvmap_handle *h);
+
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
+
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
+
+static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
+{
+ if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
+ pr_err("%s: %s getting a freed handle\n",
+ __func__, current->group_leader->comm);
+ if (atomic_read(&h->ref) <= 0)
+ return NULL;
+ }
+ return h;
+}
+
+static inline void nvmap_handle_put(struct nvmap_handle *h)
+{
+ int cnt = atomic_dec_return(&h->ref);
+
+ if (WARN_ON(cnt < 0)) {
+ pr_err("%s: %s put to negative references\n",
+ __func__, current->comm);
+ } else if (cnt == 0)
+ _nvmap_handle_free(h);
+}
+
+static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
+{
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
+ return pgprot_dmacoherent(prot);
+ else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+ return pgprot_writecombine(prot);
+ else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+ return pgprot_inner_writeback(prot);
+ return prot;
+}
+
+int is_nvmap_vma(struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
new file mode 100644
index 000000000000..b84a788dd33b
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -0,0 +1,917 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_dev.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/backing-dev.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_ioctl.h"
+#include "nvmap_mru.h"
+
+#define NVMAP_NUM_PTES 64
+
+struct nvmap_carveout_node {
+ struct list_head heap_list;
+ unsigned int heap_bit;
+ struct nvmap_heap *carveout;
+};
+
+struct nvmap_device {
+ struct vm_struct *vm_rgn;
+ pte_t *ptes[NVMAP_NUM_PTES];
+ unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
+ unsigned int lastpte;
+ spinlock_t ptelock;
+
+ struct rb_root handles;
+ spinlock_t handle_lock;
+ wait_queue_head_t pte_wait;
+ struct miscdevice dev_super;
+ struct miscdevice dev_user;
+ struct list_head heaps;
+ struct nvmap_share iovmm_master;
+};
+
+struct nvmap_device *nvmap_dev;
+
+static struct backing_dev_info nvmap_bdi = {
+ .ra_pages = 0,
+ .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
+};
+
+static int nvmap_open(struct inode *inode, struct file *filp);
+static int nvmap_release(struct inode *inode, struct file *filp);
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
+static void nvmap_vma_open(struct vm_area_struct *vma);
+static void nvmap_vma_close(struct vm_area_struct *vma);
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+static const struct file_operations nvmap_user_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static const struct file_operations nvmap_super_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static struct vm_operations_struct nvmap_vma_ops = {
+ .open = nvmap_vma_open,
+ .close = nvmap_vma_close,
+ .fault = nvmap_vma_fault,
+};
+
+int is_nvmap_vma(struct vm_area_struct *vma)
+{
+ return vma->vm_ops == &nvmap_vma_ops;
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client)
+{
+ if (client->super)
+ return client->dev->dev_super.this_device;
+ else
+ return client->dev->dev_user.this_device;
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. may be called from IRQs */
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
+{
+ unsigned long flags;
+ unsigned long bit;
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
+ if (bit == NVMAP_NUM_PTES) {
+ bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
+ if (bit == dev->lastpte)
+ bit = NVMAP_NUM_PTES;
+ }
+
+ if (bit == NVMAP_NUM_PTES) {
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev->lastpte = bit;
+ set_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+
+ *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
+ return &(dev->ptes[bit]);
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. must be called from sleepable contexts */
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
+{
+ int ret;
+ pte_t **pte;
+ ret = wait_event_interruptible(dev->pte_wait,
+ !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
+
+ if (ret == -ERESTARTSYS)
+ return ERR_PTR(-EINTR);
+
+ return pte;
+}
+
+/* frees a PTE */
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
+{
+ unsigned long addr;
+ unsigned int bit = pte - dev->ptes;
+ unsigned long flags;
+
+ if (WARN_ON(bit >= NVMAP_NUM_PTES))
+ return;
+
+ addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
+ set_pte_at(&init_mm, addr, *pte, 0);
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ clear_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ wake_up(&dev->pte_wait);
+}
+
+/* verifies that the handle ref value "ref" is a valid handle ref for the
+ * file. caller must hold the file's ref_lock prior to calling this function */
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
+ unsigned long id)
+{
+ struct rb_node *n = c->handle_refs.rb_node;
+
+ while (n) {
+ struct nvmap_handle_ref *ref;
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ if ((unsigned long)ref->handle == id)
+ return ref;
+ else if (id > (unsigned long)ref->handle)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+
+ return NULL;
+}
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h = NULL;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, id);
+ if (ref)
+ h = ref->handle;
+ if (h)
+ h = nvmap_handle_get(h);
+ nvmap_ref_unlock(client);
+ return h;
+}
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b)
+{
+ struct nvmap_heap *h = nvmap_block_to_heap(b);
+ struct nvmap_carveout_node *n;
+
+ list_for_each_entry(n, &c->dev->heaps, heap_list) {
+ if (n->carveout == h)
+ return n->heap_bit;
+ }
+ return 0;
+}
+
+static int nvmap_flush_heap_block(struct nvmap_client *client,
+ struct nvmap_heap_block *block, size_t len)
+{
+ pte_t **pte;
+ void *addr;
+ unsigned long kaddr;
+ unsigned long phys = block->base;
+ unsigned long end = block->base + len;
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ kaddr = (unsigned long)addr;
+
+ while (phys < end) {
+ unsigned long next = (phys + PAGE_SIZE) & PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys);
+ void *base = (void *)kaddr + (phys & ~PAGE_MASK);
+
+ next = min(next, end);
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
+ flush_tlb_kernel_page(kaddr);
+ __cpuc_flush_dcache_area(base, next - phys);
+ phys = next;
+ }
+
+ outer_flush_range(block->base, block->base + len);
+
+ nvmap_free_pte(client->dev, pte);
+ return 0;
+}
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot)
+{
+ struct nvmap_carveout_node *co_heap;
+ struct nvmap_device *dev = client->dev;
+
+ list_for_each_entry(co_heap, &dev->heaps, heap_list) {
+ struct nvmap_heap_block *block;
+
+ if (!(co_heap->heap_bit & usage))
+ continue;
+
+ block = nvmap_heap_alloc(co_heap->carveout, len, align, prot);
+ if (block) {
+ /* flush any stale data that may be left in the
+ * cache at the block's address, since the new
+ * block may be mapped uncached */
+ if (nvmap_flush_heap_block(client, block, len)) {
+ nvmap_heap_free(block);
+ return NULL;
+ } else
+ return block;
+ }
+ }
+
+ return NULL;
+}
+
+/* remove a handle from the device's tree of all handles; called
+ * when freeing handles. */
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ spin_lock(&dev->handle_lock);
+
+ /* re-test inside the spinlock if the handle really has no clients;
+ * only remove the handle if it is unreferenced */
+ if (atomic_add_return(0, &h->ref) > 0) {
+ spin_unlock(&dev->handle_lock);
+ return -EBUSY;
+ }
+ smp_rmb();
+ BUG_ON(atomic_read(&h->ref) < 0);
+ BUG_ON(atomic_read(&h->pin) != 0);
+
+ rb_erase(&h->node, &dev->handles);
+
+ spin_unlock(&dev->handle_lock);
+ return 0;
+}
+
+/* adds a newly-created handle to the device master tree */
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ spin_lock(&dev->handle_lock);
+ p = &dev->handles.rb_node;
+ while (*p) {
+ struct nvmap_handle *b;
+
+ parent = *p;
+ b = rb_entry(parent, struct nvmap_handle, node);
+ if (h > b)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&h->node, parent, p);
+ rb_insert_color(&h->node, &dev->handles);
+ spin_unlock(&dev->handle_lock);
+}
+
+/* validates that a handle is in the device master tree, and that the
+ * client has permission to access it */
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle *h = NULL;
+ struct rb_node *n;
+
+ spin_lock(&client->dev->handle_lock);
+
+ n = client->dev->handles.rb_node;
+
+ while (n) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ if ((unsigned long)h == id) {
+ if (client->super || h->global || (h->owner == client))
+ h = nvmap_handle_get(h);
+ spin_unlock(&client->dev->handle_lock);
+ return h;
+ }
+ if (id > (unsigned long)h)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+ spin_unlock(&client->dev->handle_lock);
+ return NULL;
+}
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev)
+{
+ struct nvmap_client *client;
+
+ if (WARN_ON(!dev))
+ return NULL;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->super = true;
+ client->dev = dev;
+ /* TODO: allocate unique IOVMM client for each nvmap client */
+ client->share = &dev->iovmm_master;
+ client->handle_refs = RB_ROOT;
+
+ atomic_set(&client->iovm_commit, 0);
+
+ client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
+
+ spin_lock_init(&client->ref_lock);
+ atomic_set(&client->count, 1);
+
+ return client;
+}
+
+static void destroy_client(struct nvmap_client *client)
+{
+ struct rb_node *n;
+
+ if (!client)
+ return;
+
+ while ((n = rb_first(&client->handle_refs))) {
+ struct nvmap_handle_ref *ref;
+ int pins, dupes;
+
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ dupes = atomic_read(&ref->dupes);
+ while (dupes--)
+ nvmap_handle_put(ref->handle);
+
+ kfree(ref);
+ }
+
+ kfree(client);
+}
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
+{
+ if (WARN_ON(!client))
+ return NULL;
+
+ if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
+ return NULL;
+
+ return client;
+}
+
+struct nvmap_client *nvmap_client_get_file(int fd)
+{
+ struct nvmap_client *client = ERR_PTR(-EFAULT);
+ struct file *f = fget(fd);
+ if (!f)
+ return ERR_PTR(-EINVAL);
+
+ if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
+ client = f->private_data;
+ atomic_inc(&client->count);
+ }
+
+ fput(f);
+ return client;
+}
+
+void nvmap_client_put(struct nvmap_client *client)
+{
+ if (!client)
+ return;
+
+ if (!atomic_dec_return(&client->count))
+ destroy_client(client);
+}
+
+static int nvmap_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
+ struct nvmap_client *priv;
+ int ret;
+
+ ret = nonseekable_open(inode, filp);
+ if (unlikely(ret))
+ return ret;
+
+ BUG_ON(dev != nvmap_dev);
+ priv = nvmap_create_client(dev);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->super = (filp->f_op == &nvmap_super_fops);
+
+ filp->f_mapping->backing_dev_info = &nvmap_bdi;
+
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvmap_release(struct inode *inode, struct file *filp)
+{
+ nvmap_client_put(filp->private_data);
+ return 0;
+}
+
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
+ * will be stored in vm_private_data and faulted in. until the
+ * ioctl is made, the VMA is mapped no-access */
+ vma->vm_private_data = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->offs = 0;
+ priv->handle = NULL;
+ atomic_set(&priv->count, 1);
+
+ vma->vm_flags |= VM_SHARED;
+ vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
+ vma->vm_ops = &nvmap_vma_ops;
+ vma->vm_private_data = priv;
+
+ return 0;
+}
+
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ void __user *uarg = (void __user *)arg;
+
+ if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case NVMAP_IOC_CLAIM:
+ nvmap_warn(filp->private_data, "preserved handles not"
+ "supported\n");
+ err = -ENODEV;
+ break;
+ case NVMAP_IOC_CREATE:
+ case NVMAP_IOC_FROM_ID:
+ err = nvmap_ioctl_create(filp, cmd, uarg);
+ break;
+
+ case NVMAP_IOC_GET_ID:
+ err = nvmap_ioctl_getid(filp, uarg);
+ break;
+
+ case NVMAP_IOC_PARAM:
+ err = nvmap_ioctl_get_param(filp, uarg);
+ break;
+
+ case NVMAP_IOC_UNPIN_MULT:
+ case NVMAP_IOC_PIN_MULT:
+ err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
+ break;
+
+ case NVMAP_IOC_ALLOC:
+ err = nvmap_ioctl_alloc(filp, uarg);
+ break;
+
+ case NVMAP_IOC_FREE:
+ err = nvmap_ioctl_free(filp, arg);
+ break;
+
+ case NVMAP_IOC_MMAP:
+ err = nvmap_map_into_caller_ptr(filp, uarg);
+ break;
+
+ case NVMAP_IOC_WRITE:
+ case NVMAP_IOC_READ:
+ err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
+ break;
+
+ case NVMAP_IOC_CACHE:
+ err = nvmap_ioctl_cache_maint(filp, uarg);
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+ return err;
+}
+
+/* to ensure that the backing store for the VMA isn't freed while a fork'd
+ * reference still exists, nvmap_vma_open increments the reference count on
+ * the handle, and nvmap_vma_close decrements it. alternatively, we could
+ * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
+*/
+static void nvmap_vma_open(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ priv = vma->vm_private_data;
+
+ BUG_ON(!priv);
+
+ atomic_inc(&priv->count);
+}
+
+static void nvmap_vma_close(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv = vma->vm_private_data;
+
+ if (priv && !atomic_dec_return(&priv->count)) {
+ if (priv->handle)
+ nvmap_handle_put(priv->handle);
+ kfree(priv);
+ }
+
+ vma->vm_private_data = NULL;
+}
+
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct nvmap_vma_priv *priv;
+ unsigned long offs;
+
+ offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
+ priv = vma->vm_private_data;
+ if (!priv || !priv->handle || !priv->handle->alloc)
+ return VM_FAULT_SIGBUS;
+
+ offs += priv->offs;
+ /* if the VMA was split for some reason, vm_pgoff will be the VMA's
+ * offset from the original VMA */
+ offs += (vma->vm_pgoff << PAGE_SHIFT);
+
+ if (offs >= priv->handle->size)
+ return VM_FAULT_SIGBUS;
+
+ if (!priv->handle->heap_pgalloc) {
+ unsigned long pfn;
+ BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
+ pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
+ vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ return VM_FAULT_NOPAGE;
+ } else {
+ struct page *page;
+ offs >>= PAGE_SHIFT;
+ page = priv->handle->pgalloc.pages[offs];
+ if (page)
+ get_page(page);
+ vmf->page = page;
+ return (page) ? 0 : VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t attr_show_usage(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
+
+ return sprintf(buf, "%08x\n", node->heap_bit);
+}
+
+static struct device_attribute heap_attr_show_usage =
+ __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
+
+static struct attribute *heap_extra_attrs[] = {
+ &heap_attr_show_usage.attr,
+ NULL,
+};
+
+static struct attribute_group heap_extra_attr_group = {
+ .attrs = heap_extra_attrs,
+};
+
+static int nvmap_probe(struct platform_device *pdev)
+{
+ struct nvmap_platform_data *plat = pdev->dev.platform_data;
+ struct nvmap_device *dev;
+ unsigned int i;
+ int e;
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(nvmap_dev != NULL)) {
+ dev_err(&pdev->dev, "only one nvmap device may be present\n");
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "out of memory for device\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_user.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_user.name = "nvmap";
+ dev->dev_user.fops = &nvmap_user_fops;
+ dev->dev_user.parent = &pdev->dev;
+
+ dev->dev_super.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_super.name = "kvmap";
+ dev->dev_user.fops = &nvmap_super_fops;
+ dev->dev_user.parent = &pdev->dev;
+
+ dev->handles = RB_ROOT;
+
+ init_waitqueue_head(&dev->pte_wait);
+
+ init_waitqueue_head(&dev->iovmm_master.pin_wait);
+ mutex_init(&dev->iovmm_master.pin_lock);
+ dev->iovmm_master.iovmm =
+ tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL);
+ if (IS_ERR(dev->iovmm_master.iovmm)) {
+ e = PTR_ERR(dev->iovmm_master.iovmm);
+ dev_err(&pdev->dev, "couldn't create iovmm client\n");
+ goto fail;
+ }
+ dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
+ if (!dev->vm_rgn) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate remapping region\n");
+ goto fail;
+ }
+ e = nvmap_mru_init(&dev->iovmm_master);
+ if (e) {
+ dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
+ goto fail;
+ }
+
+ spin_lock_init(&dev->ptelock);
+ spin_lock_init(&dev->handle_lock);
+ INIT_LIST_HEAD(&dev->heaps);
+
+ for (i = 0; i < NVMAP_NUM_PTES; i++) {
+ unsigned long addr;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ dev->ptes[i] = pte_alloc_kernel(pmd, addr);
+ if (!dev->ptes[i]) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ }
+
+ e = misc_register(&dev->dev_user);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_user.name);
+ goto fail;
+ }
+
+ e = misc_register(&dev->dev_super);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_super.name);
+ goto fail;
+ }
+
+ for (i = 0; i < plat->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node;
+ const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate %s\n", co->name);
+ goto fail;
+ }
+ node->carveout = nvmap_heap_create(dev->dev_user.this_device,
+ co->name, co->base, co->size,
+ co->buddy_size, node);
+ if (!node->carveout) {
+ e = -ENOMEM;
+ kfree(node);
+ dev_err(&pdev->dev, "couldn't create %s\n", co->name);
+ goto fail;
+ }
+ node->heap_bit = co->usage_mask;
+ if (nvmap_heap_create_group(node->carveout,
+ &heap_extra_attr_group))
+ dev_warn(&pdev->dev, "couldn't add extra attributes\n");
+
+ dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
+ co->name, co->size / 1024);
+ list_add_tail(&node->heap_list, &dev->heaps);
+ }
+ /* FIXME: walk platform data and create heaps */
+
+ platform_set_drvdata(pdev, dev);
+ nvmap_dev = dev;
+ return 0;
+fail:
+ while (!list_empty(&dev->heaps)) {
+ struct nvmap_carveout_node *node;
+
+ node = list_first_entry(&dev->heaps,
+ struct nvmap_carveout_node, heap_list);
+ list_del(&node->heap_list);
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ kfree(node);
+ }
+ nvmap_mru_destroy(&dev->iovmm_master);
+ if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_super);
+ if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_user);
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+ if (dev->vm_rgn)
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return e;
+}
+
+static int nvmap_remove(struct platform_device *pdev)
+{
+ struct nvmap_device *dev = platform_get_drvdata(pdev);
+ struct rb_node *n;
+ struct nvmap_handle *h;
+
+ misc_deregister(&dev->dev_super);
+ misc_deregister(&dev->dev_user);
+
+ while ((n = rb_first(&dev->handles))) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ rb_erase(&h->node, &dev->handles);
+ kfree(h);
+ }
+
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+
+ nvmap_mru_destroy(&dev->iovmm_master);
+
+ while (!list_empty(&dev->heaps)) {
+ struct nvmap_carveout_node *node;
+
+ node = list_first_entry(&dev->heaps,
+ struct nvmap_carveout_node, heap_list);
+ list_del(&node->heap_list);
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ kfree(node);
+ }
+
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return 0;
+}
+
+static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int nvmap_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver nvmap_driver = {
+ .probe = nvmap_probe,
+ .remove = nvmap_remove,
+ .suspend = nvmap_suspend,
+ .resume = nvmap_resume,
+
+ .driver = {
+ .name = "tegra-nvmap",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nvmap_init_driver(void)
+{
+ int e;
+
+ nvmap_dev = NULL;
+
+ e = nvmap_heap_init();
+ if (e)
+ goto fail;
+
+ e = platform_driver_register(&nvmap_driver);
+ if (e) {
+ nvmap_heap_deinit();
+ goto fail;
+ }
+
+fail:
+ return e;
+}
+fs_initcall(nvmap_init_driver);
+
+static void __exit nvmap_exit_driver(void)
+{
+ platform_driver_unregister(&nvmap_driver);
+ nvmap_heap_deinit();
+ nvmap_dev = NULL;
+}
+module_exit(nvmap_exit_driver);
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
new file mode 100644
index 000000000000..eed3403db02d
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -0,0 +1,457 @@
+/*
+ * drivers/video/tegra/nvmap_handle.c
+ *
+ * Handle allocation and freeing routines for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <asm/attrib_alloc.h>
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
+#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+/* handles may be arbitrarily large (16+MiB), and any handle allocated from
+ * the kernel (i.e., not a carveout handle) includes its array of pages. to
+ * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
+ * the array is allocated using vmalloc. */
+#define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
+
+static inline void *altalloc(size_t len)
+{
+ if (len >= PAGELIST_VMALLOC_MIN)
+ return vmalloc(len);
+ else
+ return kmalloc(len, GFP_KERNEL);
+}
+
+static inline void altfree(void *ptr, size_t len)
+{
+ if (!ptr)
+ return;
+
+ if (len >= PAGELIST_VMALLOC_MIN)
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+ struct nvmap_client *client = h->owner;
+ unsigned int i, nr_page;
+
+ if (nvmap_handle_remove(client->dev, h) != 0)
+ return;
+
+ if (!h->alloc)
+ goto out;
+
+ if (!h->heap_pgalloc) {
+ nvmap_heap_free(h->carveout);
+ goto out;
+ }
+
+ nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
+
+ BUG_ON(h->size & ~PAGE_MASK);
+ BUG_ON(!h->pgalloc.pages);
+
+ nvmap_mru_remove(client->share, h);
+ if (h->pgalloc.area)
+ tegra_iovmm_free_vm(h->pgalloc.area);
+
+ for (i = 0; i < nr_page; i++)
+ arm_attrib_free_page(h->pgalloc.pages[i]);
+
+ altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
+
+out:
+ kfree(h);
+ nvmap_client_put(client);
+}
+
+static int handle_page_alloc(struct nvmap_client *client,
+ struct nvmap_handle *h, bool contiguous)
+{
+ size_t size = PAGE_ALIGN(h->size);
+ unsigned int nr_page = size >> PAGE_SHIFT;
+ pgprot_t prot;
+ unsigned int i = 0;
+ struct page **pages;
+
+ pages = altalloc(nr_page * sizeof(*pages));
+ if (!pages)
+ return -ENOMEM;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (nr_page == 1)
+ contiguous = true;
+
+ h->pgalloc.area = NULL;
+ if (contiguous) {
+ struct page *page;
+ page = arm_attrib_alloc_pages_exact(GFP_NVMAP, size, prot);
+
+ for (i = 0; i < nr_page; i++)
+ pages[i] = nth_page(page, i);
+
+ } else {
+ for (i = 0; i < nr_page; i++) {
+ pages[i] = arm_attrib_alloc_page(GFP_NVMAP, prot);
+ if (!pages[i])
+ goto fail;
+ }
+
+#ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
+ NULL, size, prot);
+ if (!h->pgalloc.area)
+ goto fail;
+
+ h->pgalloc.dirty = true;
+#endif
+ }
+
+
+ h->size = size;
+ h->pgalloc.pages = pages;
+ h->pgalloc.contig = contiguous;
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return 0;
+
+fail:
+ while (i--)
+ arm_attrib_free_page(pages[i]);
+ altfree(pages, nr_page * sizeof(*pages));
+ return -ENOMEM;
+}
+
+static void alloc_handle(struct nvmap_client *client, size_t align,
+ struct nvmap_handle *h, unsigned int type)
+{
+ BUG_ON(type & (type - 1));
+
+ if (type & NVMAP_HEAP_CARVEOUT_MASK) {
+ struct nvmap_heap_block *b;
+ b = nvmap_carveout_alloc(client, h->size, align,
+ type, h->flags);
+ if (b) {
+ h->carveout = b;
+ h->heap_pgalloc = false;
+ h->alloc = true;
+ }
+ } else if (type & NVMAP_HEAP_IOVMM) {
+ size_t reserved = PAGE_ALIGN(h->size);
+ int commit;
+ int ret;
+
+ BUG_ON(align > PAGE_SIZE);
+
+ /* increment the committed IOVM space prior to allocation
+ * to avoid race conditions with other threads simultaneously
+ * allocating. */
+ commit = atomic_add_return(reserved, &client->iovm_commit);
+
+ if (commit < client->iovm_limit)
+ ret = handle_page_alloc(client, h, false);
+ else
+ ret = -ENOMEM;
+
+ if (!ret) {
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ } else {
+ atomic_sub(reserved, &client->iovm_commit);
+ }
+
+ } else if (type & NVMAP_HEAP_SYSMEM) {
+
+ if (handle_page_alloc(client, h, true) == 0) {
+ BUG_ON(!h->pgalloc.contig);
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ }
+ }
+}
+
+/* small allocations will try to allocate from generic OS memory before
+ * any of the limited heaps, to increase the effective memory for graphics
+ * allocations, and to reduce fragmentation of the graphics heaps with
+ * sub-page splinters */
+static const unsigned int heap_policy_small[] = {
+ NVMAP_HEAP_CARVEOUT_IRAM,
+ NVMAP_HEAP_SYSMEM,
+ NVMAP_HEAP_CARVEOUT_MASK,
+ NVMAP_HEAP_IOVMM,
+ 0,
+};
+
+static const unsigned int heap_policy_large[] = {
+ NVMAP_HEAP_CARVEOUT_IRAM,
+ NVMAP_HEAP_IOVMM,
+ NVMAP_HEAP_CARVEOUT_MASK,
+ NVMAP_HEAP_SYSMEM,
+ 0,
+};
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags)
+{
+ struct nvmap_handle *h = NULL;
+ const unsigned int *alloc_policy;
+ int nr_page;
+ int err = -ENOMEM;
+
+ align = max_t(size_t, align, L1_CACHE_BYTES);
+
+ /* can't do greater than page size alignment with page alloc */
+ if (align > PAGE_SIZE)
+ heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
+
+ h = nvmap_get_handle_id(client, id);
+
+ if (!h)
+ return -EINVAL;
+
+ if (h->alloc)
+ goto out;
+
+ nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ h->secure = !!(flags & NVMAP_HANDLE_SECURE);
+ h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
+
+ /* secure allocations can only be served from secure heaps */
+ if (h->secure)
+ heap_mask &= NVMAP_SECURE_HEAPS;
+
+ if (!heap_mask) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
+
+ while (!h->alloc && *alloc_policy) {
+ unsigned int heap_type;
+
+ heap_type = *alloc_policy++;
+ heap_type &= heap_mask;
+
+ if (!heap_type)
+ continue;
+
+ heap_mask &= ~heap_type;
+
+ while (heap_type && !h->alloc) {
+ unsigned int heap;
+
+ /* iterate possible heaps MSB-to-LSB, since higher-
+ * priority carveouts will have higher usage masks */
+ heap = 1 << __fls(heap_type);
+ alloc_handle(client, align, h, heap);
+ heap_type &= ~heap;
+ }
+ }
+
+out:
+ err = (h->alloc) ? 0 : err;
+ nvmap_handle_put(h);
+ return err;
+}
+
+void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h;
+ int pins;
+
+ nvmap_ref_lock(client);
+
+ ref = _nvmap_validate_id_locked(client, id);
+ if (!ref) {
+ nvmap_ref_unlock(client);
+ return;
+ }
+
+ BUG_ON(!ref->handle);
+ h = ref->handle;
+
+ if (atomic_dec_return(&ref->dupes)) {
+ nvmap_ref_unlock(client);
+ goto out;
+ }
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
+ atomic_sub(h->size, &client->iovm_commit);
+
+ nvmap_ref_unlock(client);
+
+ if (pins)
+ nvmap_err(client, "%s freeing pinned handle %p\n",
+ current->group_leader->comm, h);
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ kfree(ref);
+
+out:
+ BUG_ON(!atomic_read(&h->ref));
+ nvmap_handle_put(h);
+}
+
+static void add_handle_ref(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct rb_node **p, *parent = NULL;
+
+ nvmap_ref_lock(client);
+ p = &client->handle_refs.rb_node;
+ while (*p) {
+ struct nvmap_handle_ref *node;
+ parent = *p;
+ node = rb_entry(parent, struct nvmap_handle_ref, node);
+ if (ref->handle > node->handle)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&ref->node, parent, p);
+ rb_insert_color(&ref->node, &client->handle_refs);
+ nvmap_ref_unlock(client);
+}
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size)
+{
+ struct nvmap_handle *h;
+ struct nvmap_handle_ref *ref = NULL;
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ kfree(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&h->ref, 1);
+ atomic_set(&h->pin, 0);
+ h->owner = nvmap_client_get(client);
+ BUG_ON(!h->owner);
+ h->size = h->orig_size = size;
+ h->flags = NVMAP_HANDLE_WRITE_COMBINE;
+
+ nvmap_handle_add(client->dev, h);
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_handle *h = NULL;
+
+ BUG_ON(!client || client->dev != nvmap_dev);
+ /* on success, the reference count for the handle should be
+ * incremented, so the success paths will not call nvmap_handle_put */
+ h = nvmap_validate_get(client, id);
+
+ if (!h) {
+ nvmap_debug(client, "%s duplicate handle failed\n",
+ current->group_leader->comm);
+ return ERR_PTR(-EPERM);
+ }
+
+ if (!h->alloc) {
+ nvmap_err(client, "%s duplicating unallocated handle\n",
+ current->group_leader->comm);
+ nvmap_handle_put(h);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, (unsigned long)h);
+
+ if (ref) {
+ /* handle already duplicated in client; just increment
+ * the reference count rather than re-duplicating it */
+ atomic_inc(&ref->dupes);
+ nvmap_ref_unlock(client);
+ return ref;
+ }
+
+ nvmap_ref_unlock(client);
+
+ /* verify that adding this handle to the process' access list
+ * won't exceed the IOVM limit */
+ if (h->heap_pgalloc && !h->pgalloc.contig && !client->super) {
+ int oc;
+ oc = atomic_add_return(h->size, &client->iovm_commit);
+ if (oc > client->iovm_limit) {
+ atomic_sub(h->size, &client->iovm_commit);
+ nvmap_handle_put(h);
+ nvmap_err(client, "duplicating %p in %s over-commits"
+ " IOVMM space\n", (void *)id,
+ current->group_leader->comm);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ nvmap_handle_put(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c
new file mode 100644
index 000000000000..2449b00f7d52
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_heap.c
@@ -0,0 +1,807 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_heap.c
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <mach/nvmap.h>
+
+#include "nvmap_heap.h"
+
+/*
+ * "carveouts" are platform-defined regions of physically contiguous memory
+ * which are not managed by the OS. a platform may specify multiple carveouts,
+ * for either small special-purpose memory regions (like IRAM on Tegra SoCs)
+ * or reserved regions of main system memory.
+ *
+ * the carveout allocator returns allocations which are physically contiguous.
+ * to reduce external fragmentation, the allocation algorithm implemented in
+ * this file employs 3 strategies for keeping allocations of similar size
+ * grouped together inside the larger heap: the "small", "normal" and "huge"
+ * strategies. the size thresholds (in bytes) for determining which strategy
+ * to employ should be provided by the platform for each heap. it is possible
+ * for a platform to define a heap where only the "normal" strategy is used.
+ *
+ * o "normal" allocations use an address-order first-fit allocator (called
+ * BOTTOM_UP in the code below). each allocation is rounded up to be
+ * an integer multiple of the "small" allocation size.
+ *
+ * o "huge" allocations use an address-order last-fit allocator (called
+ * TOP_DOWN in the code below). like "normal" allocations, each allocation
+ * is rounded up to be an integer multiple of the "small" allocation size.
+ *
+ * o "small" allocations are treatedy differently: the heap manager maintains
+ * a pool of "small"-sized blocks internally from which allocations less
+ * than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
+ * is requested and none of the buddy sub-heaps is able to service it,
+ * the heap manager will try to allocate a new buddy-heap.
+ *
+ * this allocator is intended to keep "splinters" colocated in the carveout,
+ * and to ensure that the minimum free block size in the carveout (i.e., the
+ * "small" threshold) is still a meaningful size.
+ *
+ */
+
+#define MAX_BUDDY_NR 128 /* maximum buddies in a buddy allocator */
+
+enum direction {
+ TOP_DOWN,
+ BOTTOM_UP
+};
+
+enum block_type {
+ BLOCK_FIRST_FIT, /* block was allocated directly from the heap */
+ BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */
+};
+
+struct heap_stat {
+ size_t free; /* total free size */
+ size_t free_largest; /* largest free block */
+ size_t free_count; /* number of free blocks */
+ size_t total; /* total size */
+ size_t largest; /* largest unique block */
+ size_t count; /* total number of blocks */
+};
+
+struct buddy_heap;
+
+struct buddy_block {
+ struct nvmap_heap_block block;
+ struct buddy_heap *heap;
+};
+
+struct list_block {
+ struct nvmap_heap_block block;
+ struct list_head all_list;
+ unsigned int mem_prot;
+ unsigned long orig_addr;
+ size_t size;
+ struct nvmap_heap *heap;
+ struct list_head free_list;
+};
+
+struct combo_block {
+ union {
+ struct list_block lb;
+ struct buddy_block bb;
+ };
+};
+
+struct buddy_bits {
+ unsigned int alloc:1;
+ unsigned int order:7; /* log2(MAX_BUDDY_NR); */
+};
+
+struct buddy_heap {
+ struct list_block *heap_base;
+ unsigned int nr_buddies;
+ struct list_head buddy_list;
+ struct buddy_bits bitmap[MAX_BUDDY_NR];
+};
+
+struct nvmap_heap {
+ struct list_head all_list;
+ struct list_head free_list;
+ struct mutex lock;
+ struct list_head buddy_list;
+ unsigned int min_buddy_shift;
+ unsigned int buddy_heap_size;
+ unsigned int small_alloc;
+ const char *name;
+ void *arg;
+ struct device dev;
+};
+
+static struct kmem_cache *buddy_heap_cache;
+static struct kmem_cache *block_cache;
+
+static inline struct nvmap_heap *parent_of(struct buddy_heap *heap)
+{
+ return heap->heap_base->heap;
+}
+
+static inline unsigned int order_of(size_t len, size_t min_shift)
+{
+ len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1;
+ return fls(len)-1;
+}
+
+/* returns the free size in bytes of the buddy heap; must be called while
+ * holding the parent heap's lock. */
+static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat)
+{
+ unsigned int index;
+ unsigned int shift = parent_of(heap)->min_buddy_shift;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+ size_t curr = 1 << (heap->bitmap[index].order + shift);
+
+ stat->largest = max(stat->largest, curr);
+ stat->total += curr;
+ stat->count++;
+
+ if (!heap->bitmap[index].alloc) {
+ stat->free += curr;
+ stat->free_largest = max(stat->free_largest, curr);
+ stat->free_count++;
+ }
+ }
+}
+
+/* returns the free size of the heap (including any free blocks in any
+ * buddy-heap suballocators; must be called while holding the parent
+ * heap's lock. */
+static unsigned long heap_stat(struct nvmap_heap *heap, struct heap_stat *stat)
+{
+ struct buddy_heap *bh;
+ struct list_block *l = NULL;
+ unsigned long base = -1ul;
+
+ memset(stat, 0, sizeof(*stat));
+ mutex_lock(&heap->lock);
+ list_for_each_entry(l, &heap->all_list, all_list) {
+ stat->total += l->size;
+ stat->largest = max(l->size, stat->largest);
+ stat->count++;
+ base = min(base, l->orig_addr);
+ }
+
+ list_for_each_entry(bh, &heap->buddy_list, buddy_list) {
+ buddy_stat(bh, stat);
+ /* the total counts are double-counted for buddy heaps
+ * since the blocks allocated for buddy heaps exist in the
+ * all_list; subtract out the doubly-added stats */
+ stat->total -= bh->heap_base->size;
+ stat->count--;
+ }
+
+ list_for_each_entry(l, &heap->free_list, free_list) {
+ stat->free += l->size;
+ stat->free_count++;
+ stat->free_largest = max(l->size, stat->free_largest);
+ }
+ mutex_unlock(&heap->lock);
+
+ return base;
+}
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static struct device_attribute heap_stat_total_max =
+ __ATTR(total_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_count =
+ __ATTR(total_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_size =
+ __ATTR(total_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_max =
+ __ATTR(free_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_count =
+ __ATTR(free_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_size =
+ __ATTR(free_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_base =
+ __ATTR(base, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_attr_name =
+ __ATTR(name, S_IRUGO, heap_name_show, NULL);
+
+static struct attribute *heap_stat_attrs[] = {
+ &heap_stat_total_max.attr,
+ &heap_stat_total_count.attr,
+ &heap_stat_total_size.attr,
+ &heap_stat_free_max.attr,
+ &heap_stat_free_count.attr,
+ &heap_stat_free_size.attr,
+ &heap_stat_base.attr,
+ &heap_attr_name.attr,
+ NULL,
+};
+
+static struct attribute_group heap_stat_attr_group = {
+ .attrs = heap_stat_attrs,
+};
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return sprintf(buf, "%s\n", heap->name);
+}
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ struct heap_stat stat;
+ unsigned long base;
+
+ base = heap_stat(heap, &stat);
+
+ if (attr == &heap_stat_total_max)
+ return sprintf(buf, "%u\n", stat.largest);
+ else if (attr == &heap_stat_total_count)
+ return sprintf(buf, "%u\n", stat.count);
+ else if (attr == &heap_stat_total_size)
+ return sprintf(buf, "%u\n", stat.total);
+ else if (attr == &heap_stat_free_max)
+ return sprintf(buf, "%u\n", stat.free_largest);
+ else if (attr == &heap_stat_free_count)
+ return sprintf(buf, "%u\n", stat.free_count);
+ else if (attr == &heap_stat_free_size)
+ return sprintf(buf, "%u\n", stat.free);
+ else if (attr == &heap_stat_base)
+ return sprintf(buf, "%08lx\n", base);
+ else
+ return -EINVAL;
+}
+
+static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
+ size_t size, size_t align,
+ unsigned int mem_prot)
+{
+ unsigned int index = 0;
+ unsigned int min_shift = parent_of(heap)->min_buddy_shift;
+ unsigned int order = order_of(size, min_shift);
+ unsigned int align_mask;
+ unsigned int best = heap->nr_buddies;
+ struct buddy_block *b;
+
+ if (heap->heap_base->mem_prot != mem_prot)
+ return NULL;
+
+ align = max(align, (size_t)(1 << min_shift));
+ align_mask = (align >> min_shift) - 1;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+
+ if (heap->bitmap[index].alloc || (index & align_mask) ||
+ (heap->bitmap[index].order < order))
+ continue;
+
+ if (best == heap->nr_buddies ||
+ heap->bitmap[index].order < heap->bitmap[best].order)
+ best = index;
+
+ if (heap->bitmap[best].order == order)
+ break;
+ }
+
+ if (best == heap->nr_buddies)
+ return NULL;
+
+ b = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ while (heap->bitmap[best].order != order) {
+ unsigned int buddy;
+ heap->bitmap[best].order--;
+ buddy = best ^ (1 << heap->bitmap[best].order);
+ heap->bitmap[buddy].order = heap->bitmap[best].order;
+ heap->bitmap[buddy].alloc = 0;
+ }
+ heap->bitmap[best].alloc = 1;
+ b->block.base = heap->heap_base->block.base + (best << min_shift);
+ b->heap = heap;
+ b->block.type = BLOCK_BUDDY;
+ return &b->block;
+}
+
+static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
+{
+ struct buddy_block *b = container_of(block, struct buddy_block, block);
+ struct buddy_heap *h = b->heap;
+ unsigned int min_shift = parent_of(h)->min_buddy_shift;
+ unsigned int index;
+
+ index = (block->base - h->heap_base->block.base) >> min_shift;
+ h->bitmap[index].alloc = 0;
+
+ for (;;) {
+ unsigned int buddy = index ^ (1 << h->bitmap[index].order);
+ if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc ||
+ h->bitmap[buddy].order != h->bitmap[index].order)
+ break;
+
+ h->bitmap[buddy].order++;
+ h->bitmap[index].order++;
+ index = min(buddy, index);
+ }
+
+ kmem_cache_free(block_cache, b);
+ if ((1 << h->bitmap[0].order) == h->nr_buddies)
+ return h;
+
+ return NULL;
+}
+
+static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
+ size_t len, size_t align,
+ unsigned int mem_prot)
+{
+ struct list_block *b = NULL;
+ struct list_block *i = NULL;
+ struct list_block *rem = NULL;
+ unsigned long fix_base;
+ enum direction dir;
+
+ /* since pages are only mappable with one cache attribute,
+ * and most allocations from carveout heaps are DMA coherent
+ * (i.e., non-cacheable), round cacheable allocations up to
+ * a page boundary to ensure that the physical pages will
+ * only be mapped one way. */
+ if (mem_prot == NVMAP_HANDLE_CACHEABLE ||
+ mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) {
+ align = max_t(size_t, align, PAGE_SIZE);
+ len = PAGE_ALIGN(len);
+ }
+
+ dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
+
+ if (dir == BOTTOM_UP) {
+ list_for_each_entry(i, &heap->free_list, free_list) {
+ size_t fix_size;
+ fix_base = ALIGN(i->block.base, align);
+ fix_size = i->size - (fix_base - i->block.base);
+
+ if (fix_size >= len) {
+ b = i;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_reverse(i, &heap->free_list, free_list) {
+ if (i->size >= len) {
+ fix_base = i->block.base + i->size - len;
+ fix_base &= ~(align-1);
+ if (fix_base >= i->block.base) {
+ b = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!b)
+ return NULL;
+
+ if (b->block.base != fix_base) {
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem) {
+ b->orig_addr = b->block.base;
+ b->block.base = fix_base;
+ b->size -= (b->block.base - b->orig_addr);
+ goto out;
+ }
+
+ rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.base = b->block.base;
+ rem->orig_addr = rem->block.base;
+ rem->size = fix_base - rem->block.base;
+ b->block.base = fix_base;
+ b->orig_addr = fix_base;
+ b->size -= rem->size;
+ list_add_tail(&rem->all_list, &heap->all_list);
+ list_add_tail(&rem->free_list, &b->free_list);
+ }
+
+ b->orig_addr = b->block.base;
+
+ if (b->size > len) {
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem)
+ goto out;
+
+ rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.base = b->block.base + len;
+ rem->size = b->size - len;
+ BUG_ON(rem->size > b->size);
+ rem->orig_addr = rem->block.base;
+ b->size = len;
+ list_add_tail(&rem->all_list, &heap->all_list);
+ list_add(&rem->free_list, &b->free_list);
+ }
+
+out:
+ list_del(&b->free_list);
+ b->heap = heap;
+ b->mem_prot = mem_prot;
+ return &b->block;
+}
+
+#ifdef DEBUG_FREE_LIST
+static void freelist_debug(struct nvmap_heap *heap, const char *title,
+ struct list_block *token)
+{
+ int i;
+ struct list_block *n;
+
+ dev_debug(&heap->dev, "%s\n", title);
+ i = 0;
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ dev_debug(&heap->dev,"\t%d [%p..%p]%s\n", i, (void *)n->orig_addr,
+ (void *)(n->orig_addr + n->size),
+ (n == token) ? "<--" : "");
+ i++;
+ }
+}
+#else
+#define freelist_debug(_heap, _title, _token) do { } while (0)
+#endif
+
+static void do_heap_free(struct nvmap_heap_block *block)
+{
+ struct list_block *b = container_of(block, struct list_block, block);
+ struct list_block *n = NULL;
+ struct nvmap_heap *heap = b->heap;
+
+ BUG_ON(b->block.base > b->orig_addr);
+ b->size += (b->block.base - b->orig_addr);
+ b->block.base = b->orig_addr;
+
+ freelist_debug(heap, "free list before", b);
+
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ if (n->block.base > b->block.base)
+ break;
+ }
+
+ list_add_tail(&b->free_list, &n->free_list);
+ BUG_ON(list_empty(&b->all_list));
+
+ freelist_debug(heap, "free list pre-merge", b);
+
+ if (!list_is_last(&b->free_list, &heap->free_list)) {
+ n = list_first_entry(&b->free_list, struct list_block, free_list);
+ if (n->block.base == b->block.base + b->size) {
+ list_del(&n->all_list);
+ list_del(&n->free_list);
+ BUG_ON(b->orig_addr >= n->orig_addr);
+ b->size += n->size;
+ kmem_cache_free(block_cache, n);
+ }
+ }
+
+ if (b->free_list.prev != &heap->free_list) {
+ n = list_entry(b->free_list.prev, struct list_block, free_list);
+ if (n->block.base + n->size == b->block.base) {
+ list_del(&b->all_list);
+ list_del(&b->free_list);
+ BUG_ON(n->orig_addr >= b->orig_addr);
+ n->size += b->size;
+ kmem_cache_free(block_cache, b);
+ }
+ }
+
+ freelist_debug(heap, "free list after", b);
+}
+
+static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
+ size_t len, size_t align,
+ unsigned int mem_prot)
+{
+ struct buddy_heap *bh;
+ struct nvmap_heap_block *b = NULL;
+
+ list_for_each_entry(bh, &h->buddy_list, buddy_list) {
+ b = buddy_alloc(bh, len, align, mem_prot);
+ if (b)
+ return b;
+ }
+
+ /* no buddy heaps could service this allocation: try to create a new
+ * buddy heap instead */
+ bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL);
+ if (!bh)
+ return NULL;
+
+ b = do_heap_alloc(h, h->buddy_heap_size, h->buddy_heap_size, mem_prot);
+ if (!b) {
+ kmem_cache_free(buddy_heap_cache, bh);
+ return NULL;
+ }
+
+ bh->heap_base = container_of(b, struct list_block, block);
+ bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift;
+ bh->bitmap[0].alloc = 0;
+ bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift);
+ list_add_tail(&bh->buddy_list, &h->buddy_list);
+ return buddy_alloc(bh, len, align, mem_prot);
+}
+
+/* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
+ * align bytes. */
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, size_t len,
+ size_t align, unsigned int prot)
+{
+ struct nvmap_heap_block *b;
+
+ mutex_lock(&h->lock);
+ if (len <= h->buddy_heap_size / 2) {
+ b = do_buddy_alloc(h, len, align, prot);
+ } else {
+ if (h->buddy_heap_size)
+ len = ALIGN(len, h->buddy_heap_size);
+ align = max(align, (size_t)L1_CACHE_BYTES);
+ b = do_heap_alloc(h, len, align, prot);
+ }
+ mutex_unlock(&h->lock);
+ return b;
+}
+
+/* nvmap_heap_free: frees block b*/
+void nvmap_heap_free(struct nvmap_heap_block *b)
+{
+ struct buddy_heap *bh = NULL;
+ struct nvmap_heap *h;
+
+ if (b->type == BLOCK_BUDDY) {
+ struct buddy_block *bb;
+ bb = container_of(b, struct buddy_block, block);
+ h = bb->heap->heap_base->heap;
+ } else {
+ struct list_block *lb;
+ lb = container_of(b, struct list_block, block);
+ h = lb->heap;
+ }
+
+ mutex_lock(&h->lock);
+ if (b->type == BLOCK_BUDDY)
+ bh = do_buddy_free(b);
+ else
+ do_heap_free(b);
+
+ if (bh) {
+ list_del(&bh->buddy_list);
+ mutex_unlock(&h->lock);
+ nvmap_heap_free(&bh->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, bh);
+ } else
+ mutex_unlock(&h->lock);
+}
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
+{
+ if (b->type == BLOCK_BUDDY) {
+ struct buddy_block *bb;
+ bb = container_of(b, struct buddy_block, block);
+ return parent_of(bb->heap);
+ } else {
+ struct list_block *lb;
+ lb = container_of(b, struct list_block, block);
+ return lb->heap;
+ }
+}
+
+static void heap_release(struct device *heap)
+{
+}
+
+/* nvmap_heap_create: create a heap object of len bytes, starting from
+ * address base.
+ *
+ * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2
+ * of the buddy heap size will use a buddy sub-allocator, where each buddy
+ * heap is buddy_size bytes (should be a power of 2). all other allocations
+ * will be rounded up to be a multiple of buddy_size bytes.
+ */
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ unsigned long base, size_t len,
+ size_t buddy_size, void *arg)
+{
+ struct nvmap_heap *h = NULL;
+ struct list_block *l = NULL;
+
+ if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) {
+ dev_warn(parent, "%s: buddy_size %u too small\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size >= len)) {
+ dev_warn(parent, "%s: buddy_size %u too large\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size & (buddy_size - 1))) {
+ dev_warn(parent, "%s: buddy_size %u not a power of 2\n",
+ __func__, buddy_size);
+ buddy_size = 1 << (ilog2(buddy_size) + 1);
+ }
+
+ if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) {
+ unsigned long orig = base;
+ dev_warn(parent, "%s: base address %p not aligned to "
+ "buddy_size %u\n", __func__, (void *)base, buddy_size);
+ base = ALIGN(base, buddy_size);
+ len -= (base - orig);
+ }
+
+ if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) {
+ dev_warn(parent, "%s: length %u not aligned to "
+ "buddy_size %u\n", __func__, len, buddy_size);
+ len &= ~(buddy_size - 1);
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ l = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!l) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ dev_set_name(&h->dev, "heap-%s", name);
+ h->name = name;
+ h->arg = arg;
+ h->dev.parent = parent;
+ h->dev.driver = NULL;
+ h->dev.release = heap_release;
+ if (device_register(&h->dev)) {
+ dev_err(parent, "%s: failed to register %s\n", __func__,
+ dev_name(&h->dev));
+ goto fail_alloc;
+ }
+ if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) {
+ dev_err(&h->dev, "%s: failed to create attributes\n", __func__);
+ goto fail_register;
+ }
+ h->small_alloc = max(2 * buddy_size, len / 256);
+ h->buddy_heap_size = buddy_size;
+ if (buddy_size)
+ h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR);
+ INIT_LIST_HEAD(&h->free_list);
+ INIT_LIST_HEAD(&h->buddy_list);
+ INIT_LIST_HEAD(&h->all_list);
+ mutex_init(&h->lock);
+ l->block.base = base;
+ l->block.type = BLOCK_FIRST_FIT;
+ l->size = len;
+ l->orig_addr = base;
+ list_add_tail(&l->free_list, &h->free_list);
+ list_add_tail(&l->all_list, &h->all_list);
+ return h;
+
+fail_register:
+ device_unregister(&h->dev);
+fail_alloc:
+ if (l)
+ kmem_cache_free(block_cache, l);
+ kfree(h);
+ return NULL;
+}
+
+void *nvmap_heap_device_to_arg(struct device *dev)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return heap->arg;
+}
+
+/* nvmap_heap_destroy: frees all resources in heap */
+void nvmap_heap_destroy(struct nvmap_heap *heap)
+{
+ WARN_ON(!list_empty(&heap->buddy_list));
+
+ sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group);
+ device_unregister(&heap->dev);
+
+ while (!list_empty(&heap->buddy_list)) {
+ struct buddy_heap *b;
+ b = list_first_entry(&heap->buddy_list, struct buddy_heap,
+ buddy_list);
+ list_del(&heap->buddy_list);
+ nvmap_heap_free(&b->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, b);
+ }
+
+ WARN_ON(!list_is_singular(&heap->all_list));
+ while (!list_empty(&heap->all_list)) {
+ struct list_block *l;
+ l = list_first_entry(&heap->all_list, struct list_block,
+ all_list);
+ list_del(&l->all_list);
+ kmem_cache_free(block_cache, l);
+ }
+
+ kfree(heap);
+}
+
+/* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ return sysfs_create_group(&heap->dev.kobj, grp);
+}
+
+/* nvmap_heap_remove_group: removes the attribute_group grp */
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ sysfs_remove_group(&heap->dev.kobj, grp);
+}
+
+int nvmap_heap_init(void)
+{
+ BUG_ON(buddy_heap_cache != NULL);
+ buddy_heap_cache = KMEM_CACHE(buddy_heap, 0);
+ if (!buddy_heap_cache) {
+ pr_err("%s: unable to create buddy heap cache\n", __func__);
+ return -ENOMEM;
+ }
+
+ block_cache = KMEM_CACHE(combo_block, 0);
+ if (!block_cache) {
+ kmem_cache_destroy(buddy_heap_cache);
+ pr_err("%s: unable to create block cache\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void nvmap_heap_deinit(void)
+{
+ if (buddy_heap_cache)
+ kmem_cache_destroy(buddy_heap_cache);
+ if (block_cache)
+ kmem_cache_destroy(block_cache);
+
+ block_cache = NULL;
+ buddy_heap_cache = NULL;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.h b/drivers/video/tegra/nvmap/nvmap_heap.h
new file mode 100644
index 000000000000..66854f26b353
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_heap.h
@@ -0,0 +1,62 @@
+/*
+ * drivers/video/tegra/nvmap_heap.h
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVMAP_HEAP_H
+#define __NVMAP_HEAP_H
+
+struct device;
+struct nvmap_heap;
+struct attribute_group;
+
+struct nvmap_heap_block {
+ unsigned long base;
+ unsigned int type;
+};
+
+#define NVMAP_HEAP_MIN_BUDDY_SIZE 8192
+
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ unsigned long base, size_t len,
+ unsigned int buddy_size, void *arg);
+
+void nvmap_heap_destroy(struct nvmap_heap *heap);
+
+void *nvmap_heap_device_to_arg(struct device *dev);
+
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap, size_t len,
+ size_t align, unsigned int prot);
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
+
+void nvmap_heap_free(struct nvmap_heap_block *block);
+
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+int __init nvmap_heap_init(void);
+
+void nvmap_heap_deinit(void);
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c
new file mode 100644
index 000000000000..9051803aa68d
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c
@@ -0,0 +1,629 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap_ioctl.h"
+#include "nvmap.h"
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count);
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op);
+
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
+{
+ struct nvmap_pin_handle op;
+ struct nvmap_handle *h;
+ unsigned long on_stack[16];
+ unsigned long *refs;
+ unsigned long __user *output;
+ unsigned int i;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.count)
+ return -EINVAL;
+
+ if (op.count > 1) {
+ size_t bytes = op.count * sizeof(unsigned long *);
+
+ if (op.count > ARRAY_SIZE(on_stack))
+ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
+ else
+ refs = on_stack;
+
+ if (!refs)
+ return -ENOMEM;
+
+ if (copy_from_user(refs, (void *)op.handles, bytes)) {
+ err = -EFAULT;
+ goto out;
+ }
+ } else {
+ refs = on_stack;
+ on_stack[0] = (unsigned long)op.handles;
+ }
+
+ if (is_pin)
+ err = nvmap_pin_ids(filp->private_data, op.count, refs);
+ else
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+ /* skip the output stage on unpin */
+ if (err || !is_pin)
+ goto out;
+
+ /* it is guaranteed that if nvmap_pin_ids returns 0 that
+ * all of the handle_ref objects are valid, so dereferencing
+ * directly here is safe */
+ if (op.count > 1)
+ output = (unsigned long __user *)op.addr;
+ else {
+ struct nvmap_pin_handle __user *tmp = arg;
+ output = (unsigned long __user *)&(tmp->addr);
+ }
+
+ if (!output)
+ goto out;
+
+ for (i = 0; i < op.count && !err; i++) {
+ unsigned long addr;
+
+ h = (struct nvmap_handle *)refs[i];
+
+ if (h->heap_pgalloc && h->pgalloc.contig)
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc)
+ addr = h->pgalloc.area->iovm_start;
+ else
+ addr = h->carveout->base;
+
+ err = put_user(addr, &output[i]);
+ }
+
+ if (err)
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+out:
+ if (refs != on_stack)
+ kfree(refs);
+
+ return err;
+}
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_create_handle op;
+ struct nvmap_handle *h = NULL;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ op.id = (__u32)h;
+ if (client == h->owner)
+ h->global = true;
+
+ nvmap_handle_put(h);
+
+ return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
+}
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
+{
+ struct nvmap_alloc_handle op;
+ struct nvmap_client *client = filp->private_data;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ if (op.align & (op.align - 1))
+ return -EINVAL;
+
+ /* user-space handles are aligned to page boundaries, to prevent
+ * data leakage. */
+ op.align = max_t(size_t, op.align, PAGE_SIZE);
+
+ return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
+ op.align, op.flags);
+}
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
+{
+ struct nvmap_create_handle op;
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_client *client = filp->private_data;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!client)
+ return -ENODEV;
+
+ if (cmd == NVMAP_IOC_CREATE) {
+ ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
+ if (!IS_ERR(ref))
+ ref->handle->orig_size = op.size;
+ } else if (cmd == NVMAP_IOC_FROM_ID) {
+ ref = nvmap_duplicate_handle_id(client, op.id);
+ } else {
+ return -EINVAL;
+ }
+
+ if (IS_ERR(ref))
+ return PTR_ERR(ref);
+
+ op.handle = nvmap_ref_to_id(ref);
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ err = -EFAULT;
+ nvmap_free_handle_id(client, op.handle);
+ }
+
+ return err;
+}
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_map_caller op;
+ struct nvmap_vma_priv *vpriv;
+ struct vm_area_struct *vma;
+ struct nvmap_handle *h = NULL;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->mm, op.addr);
+ if (!vma || !vma->vm_private_data) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (op.offset & ~PAGE_MASK) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if ((op.offset + op.length) > h->size) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = vma->vm_private_data;
+ BUG_ON(!vpriv);
+
+ /* the VMA must exactly match the requested mapping operation, and the
+ * VMA that is targetted must have been created by this driver
+ */
+ if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
+ (vma->vm_end-vma->vm_start != op.length)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ /* verify that each mmap() system call creates a unique VMA */
+
+ if (vpriv->handle && (h == vpriv->handle)) {
+ goto out;
+ } else if (vpriv->handle) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ vpriv->handle = h;
+ vpriv->offs = op.offset;
+
+ vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
+
+out:
+ up_read(&current->mm->mmap_sem);
+ if (err)
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
+{
+ struct nvmap_handle_param op;
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_handle *h;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EINVAL;
+
+ switch (op.param) {
+ case NVMAP_HANDLE_PARAM_SIZE:
+ op.result = h->orig_size;
+ break;
+ case NVMAP_HANDLE_PARAM_ALIGNMENT:
+ if (!h->alloc)
+ op.result = 0;
+ else if (h->heap_pgalloc)
+ op.result = PAGE_SIZE;
+ else if (h->carveout->base)
+ op.result = (h->carveout->base & -h->carveout->base);
+ else
+ op.result = SZ_4M;
+ break;
+ case NVMAP_HANDLE_PARAM_BASE:
+ if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
+ op.result = -1ul;
+ else if (!h->heap_pgalloc)
+ op.result = h->carveout->base;
+ else if (h->pgalloc.contig)
+ op.result = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->pgalloc.area)
+ op.result = h->pgalloc.area->iovm_start;
+ else
+ op.result = -1ul;
+ break;
+ case NVMAP_HANDLE_PARAM_HEAP:
+ if (!h->alloc)
+ op.result = 0;
+ else if (!h->heap_pgalloc)
+ op.result = nvmap_carveout_usage(client, h->carveout);
+ else if (h->pgalloc.contig)
+ op.result = NVMAP_HEAP_SYSMEM;
+ else
+ op.result = NVMAP_HEAP_IOVMM;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (!err && copy_to_user(arg, &op, sizeof(op)))
+ err = -EFAULT;
+
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_rw_handle __user *uarg = arg;
+ struct nvmap_rw_handle op;
+ struct nvmap_handle *h;
+ ssize_t copied;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || !op.count || !op.elem_size)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EPERM;
+
+ copied = rw_handle(client, h, is_read, op.offset,
+ (unsigned long)op.addr, op.hmem_stride,
+ op.user_stride, op.elem_size, op.count);
+
+ if (copied < 0) {
+ err = copied;
+ copied = 0;
+ } else if (copied < (op.count * op.elem_size))
+ err = -EINTR;
+
+ __put_user(copied, &uarg->count);
+
+ nvmap_handle_put(h);
+
+ return err;
+}
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_cache_op op;
+ struct vm_area_struct *vma;
+ struct nvmap_vma_priv *vpriv;
+ unsigned long start;
+ unsigned long end;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
+ op.op > NVMAP_CACHE_OP_WB_INV)
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->active_mm, (unsigned long)op.addr);
+ if (!vma || !is_nvmap_vma(vma) ||
+ (unsigned long)op.addr + op.len > vma->vm_end) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
+
+ if ((unsigned long)vpriv->handle != op.handle) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ start = (unsigned long)op.addr - vma->vm_start;
+ end = start + op.len;
+
+ err = cache_maint(client, vpriv->handle, start, end, op.op);
+out:
+ up_read(&current->mm->mmap_sem);
+ return err;
+}
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg)
+{
+ struct nvmap_client *client = filp->private_data;
+
+ if (!arg)
+ return 0;
+
+ nvmap_free_handle_id(client, arg);
+ return 0;
+}
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op)
+{
+ enum dma_data_direction dir;
+ pgprot_t prot;
+ pte_t **pte = NULL;
+ unsigned long kaddr;
+ unsigned long loop;
+ int err = 0;
+
+ h = nvmap_handle_get(h);
+ if (!h)
+ return -EFAULT;
+
+ if (!h->alloc) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+ h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+ goto out;
+
+ if (WARN_ON_ONCE(op == NVMAP_CACHE_OP_WB_INV))
+ dir = DMA_BIDIRECTIONAL;
+ else if (op == NVMAP_CACHE_OP_WB)
+ dir = DMA_TO_DEVICE;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ if (h->heap_pgalloc) {
+ while (start < end) {
+ unsigned long next = (start + PAGE_SIZE) & PAGE_MASK;
+ struct page *page;
+
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ next = min(next, end);
+ __dma_page_cpu_to_dev(page, start & ~PAGE_MASK,
+ next - start, dir);
+ start = next;
+ }
+ goto out;
+ }
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+ pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
+ if (IS_ERR(pte)) {
+ err = PTR_ERR(pte);
+ pte = NULL;
+ goto out;
+ }
+
+ if (start > h->size || end > h->size) {
+ nvmap_warn(client, "cache maintenance outside handle\n");
+ return -EINVAL;
+ }
+
+ start += h->carveout->base;
+ end += h->carveout->base;
+
+ loop = start;
+
+ while (loop < end) {
+ unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
+ void *base = (void *)kaddr + (loop & ~PAGE_MASK);
+ next = min(next, end);
+
+ set_pte_at(&init_mm, kaddr, *pte,
+ pfn_pte(__phys_to_pfn(loop), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ dmac_map_area(base, next - loop, dir);
+ loop = next;
+ }
+
+ if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
+ if (dir != DMA_FROM_DEVICE)
+ outer_clean_range(start, end);
+ else
+ outer_inv_range(start, end);
+ }
+
+out:
+ if (pte)
+ nvmap_free_pte(client->dev, pte);
+ nvmap_handle_put(h);
+ wmb();
+ return err;
+}
+
+static int rw_handle_page(struct nvmap_handle *h, int is_read,
+ unsigned long start, unsigned long rw_addr,
+ unsigned long bytes, unsigned long kaddr, pte_t *pte)
+{
+ pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
+ unsigned long end = start + bytes;
+ int err = 0;
+
+ while (!err && start < end) {
+ struct page *page = NULL;
+ unsigned long phys;
+ size_t count;
+ void *src;
+
+ if (!h->heap_pgalloc) {
+ phys = h->carveout->base + start;
+ } else {
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ BUG_ON(!page);
+ get_page(page);
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
+ }
+
+ set_pte_at(&init_mm, kaddr, pte,
+ pfn_pte(__phys_to_pfn(phys), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ src = (void *)kaddr + (phys & ~PAGE_MASK);
+ phys = PAGE_SIZE - (phys & ~PAGE_MASK);
+ count = min_t(size_t, end - start, phys);
+
+ if (is_read)
+ err = copy_to_user((void *)rw_addr, src, count);
+ else
+ err = copy_from_user(src, (void *)rw_addr, count);
+
+ if (err)
+ err = -EFAULT;
+
+ rw_addr += count;
+ start += count;
+
+ if (page)
+ put_page(page);
+ }
+
+ return err;
+}
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count)
+{
+ ssize_t copied = 0;
+ pte_t **pte;
+ void *addr;
+ int ret = 0;
+
+ if (!elem_size)
+ return -EINVAL;
+
+ if (!h->alloc)
+ return -EFAULT;
+
+ if (elem_size == h_stride && elem_size == sys_stride) {
+ elem_size *= count;
+ h_stride = elem_size;
+ sys_stride = elem_size;
+ count = 1;
+ }
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ while (count--) {
+ if (h_offs + elem_size >= h->size) {
+ nvmap_warn(client, "read/write outside of handle\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = rw_handle_page(h, is_read, h_offs, sys_addr,
+ elem_size, (unsigned long)addr, *pte);
+
+ if (ret)
+ break;
+
+ copied += elem_size;
+ sys_addr += sys_stride;
+ h_offs += h_stride;
+ }
+
+ nvmap_free_pte(client->dev, pte);
+ return ret ?: copied;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.h b/drivers/video/tegra/nvmap/nvmap_ioctl.h
new file mode 100644
index 000000000000..c802cd4dd7ae
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.h
@@ -0,0 +1,159 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.h
+ *
+ * ioctl declarations for nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_IOCTL_H
+#define __VIDEO_TEGRA_NVMAP_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+
+#include <mach/nvmap.h>
+
+enum {
+ NVMAP_HANDLE_PARAM_SIZE = 1,
+ NVMAP_HANDLE_PARAM_ALIGNMENT,
+ NVMAP_HANDLE_PARAM_BASE,
+ NVMAP_HANDLE_PARAM_HEAP,
+};
+
+enum {
+ NVMAP_CACHE_OP_WB = 0,
+ NVMAP_CACHE_OP_INV,
+ NVMAP_CACHE_OP_WB_INV,
+};
+
+
+struct nvmap_create_handle {
+ union {
+ __u32 key; /* ClaimPreservedHandle */
+ __u32 id; /* FromId */
+ __u32 size; /* CreateHandle */
+ };
+ __u32 handle;
+};
+
+struct nvmap_alloc_handle {
+ __u32 handle;
+ __u32 heap_mask;
+ __u32 flags;
+ __u32 align;
+};
+
+struct nvmap_map_caller {
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem; should be page-aligned */
+ __u32 length; /* number of bytes to map */
+ __u32 flags;
+ unsigned long addr; /* user pointer */
+};
+
+struct nvmap_rw_handle {
+ unsigned long addr; /* user pointer */
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem */
+ __u32 elem_size; /* individual atom size */
+ __u32 hmem_stride; /* delta in bytes between atoms in hmem */
+ __u32 user_stride; /* delta in bytes between atoms in user */
+ __u32 count; /* number of atoms to copy */
+};
+
+struct nvmap_pin_handle {
+ unsigned long handles; /* array of handles to pin/unpin */
+ unsigned long addr; /* array of addresses to return */
+ __u32 count; /* number of entries in handles */
+};
+
+struct nvmap_handle_param {
+ __u32 handle;
+ __u32 param;
+ unsigned long result;
+};
+
+struct nvmap_cache_op {
+ unsigned long addr;
+ __u32 handle;
+ __u32 len;
+ __s32 op;
+};
+
+#define NVMAP_IOC_MAGIC 'N'
+
+/* Creates a new memory handle. On input, the argument is the size of the new
+ * handle; on return, the argument is the name of the new handle
+ */
+#define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle)
+#define NVMAP_IOC_CLAIM _IOWR(NVMAP_IOC_MAGIC, 1, struct nvmap_create_handle)
+#define NVMAP_IOC_FROM_ID _IOWR(NVMAP_IOC_MAGIC, 2, struct nvmap_create_handle)
+
+/* Actually allocates memory for the specified handle */
+#define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle)
+
+/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
+ */
+#define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4)
+
+/* Maps the region of the specified handle into a user-provided virtual address
+ * that was previously created via an mmap syscall on this fd */
+#define NVMAP_IOC_MMAP _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller)
+
+/* Reads/writes data (possibly strided) from a user-provided buffer into the
+ * hmem at the specified offset */
+#define NVMAP_IOC_WRITE _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle)
+#define NVMAP_IOC_READ _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle)
+
+#define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param)
+
+/* Pins a list of memory handles into IO-addressable memory (either IOVMM
+ * space or physical memory, depending on the allocation), and returns the
+ * address. Handles may be pinned recursively. */
+#define NVMAP_IOC_PIN_MULT _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle)
+#define NVMAP_IOC_UNPIN_MULT _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle)
+
+#define NVMAP_IOC_CACHE _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op)
+
+/* Returns a global ID usable to allow a remote process to create a handle
+ * reference to the same handle */
+#define NVMAP_IOC_GET_ID _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle)
+
+#define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_GET_ID))
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg);
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg);
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg);
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg);
+
+
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.c b/drivers/video/tegra/nvmap/nvmap_mru.c
new file mode 100644
index 000000000000..252665427568
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_mru.c
@@ -0,0 +1,194 @@
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM),
+ * unpinned handles are placed onto a most-recently-used eviction list;
+ * multiple lists are maintained, segmented by size (sizes were chosen to
+ * roughly correspond with common sizes for graphics surfaces).
+ *
+ * if a handle is located on the MRU list, then the code below may
+ * steal its IOVMM area at any time to satisfy a pin operation if no
+ * free IOVMM space is available
+ */
+
+static const size_t mru_cutoff[] = {
+ 262144, 393216, 786432, 1048576, 1572864
+};
+
+static inline struct list_head *mru_list(struct nvmap_share *share, size_t size)
+{
+ unsigned int i;
+
+ BUG_ON(!share->mru_lists);
+ for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++)
+ if (size <= mru_cutoff[i])
+ break;
+
+ return &share->mru_lists[i];
+}
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm)
+{
+ size_t vm_size = tegra_iovmm_get_vm_size(iovmm);
+ return (vm_size >> 2) * 3;
+}
+
+/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h)
+{
+ size_t len = h->pgalloc.area->iovm_length;
+ list_add(&h->pgalloc.mru_list, mru_list(share, len));
+}
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
+{
+ nvmap_mru_lock(s);
+ if (!list_empty(&h->pgalloc.mru_list))
+ list_del(&h->pgalloc.mru_list);
+ nvmap_mru_unlock(s);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+}
+
+/* returns a tegra_iovmm_area for a handle. if the handle already has
+ * an iovmm_area allocated, the handle is simply removed from its MRU list
+ * and the existing iovmm_area is returned.
+ *
+ * if no existing allocation exists, try to allocate a new IOVMM area.
+ *
+ * if a new area can not be allocated, try to re-use the most-recently-unpinned
+ * handle's allocation.
+ *
+ * and if that fails, iteratively evict handles from the MRU lists and free
+ * their allocations, until the new allocation succeeds.
+ */
+struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ struct list_head *mru;
+ struct nvmap_handle *evict = NULL;
+ struct tegra_iovmm_area *vm = NULL;
+ unsigned int i, idx;
+ pgprot_t prot;
+
+ BUG_ON(!h || !c || !c->share);
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->pgalloc.area) {
+ /* since this is only called inside the pin lock, and the
+ * handle is gotten before it is pinned, there are no races
+ * where h->pgalloc.area is changed after the comparison */
+ nvmap_mru_lock(c->share);
+ BUG_ON(list_empty(&h->pgalloc.mru_list));
+ list_del(&h->pgalloc.mru_list);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ return h->pgalloc.area;
+ }
+
+ vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, h->size, prot);
+
+ if (vm) {
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return vm;
+ }
+ /* attempt to re-use the most recently unpinned IOVMM area in the
+ * same size bin as the current handle. If that fails, iteratively
+ * evict handles (starting from the current bin) until an allocation
+ * succeeds or no more areas can be evicted */
+
+ nvmap_mru_lock(c->share);
+ mru = mru_list(c->share, h->size);
+ if (!list_empty(mru))
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ if (evict && evict->pgalloc.area->iovm_length >= h->size) {
+ list_del(&evict->pgalloc.mru_list);
+ vm = evict->pgalloc.area;
+ evict->pgalloc.area = NULL;
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ return vm;
+ }
+
+ idx = mru - c->share->mru_lists;
+
+ for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
+ if (idx >= c->share->nr_mru)
+ idx = 0;
+ mru = &c->share->mru_lists[idx];
+ while (!list_empty(mru) && !vm) {
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ BUG_ON(atomic_read(&evict->pin) != 0);
+ BUG_ON(!evict->pgalloc.area);
+ list_del(&evict->pgalloc.mru_list);
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ tegra_iovmm_free_vm(evict->pgalloc.area);
+ evict->pgalloc.area = NULL;
+ vm = tegra_iovmm_create_vm(c->share->iovmm,
+ NULL, h->size, prot);
+ nvmap_mru_lock(c->share);
+ }
+ }
+ nvmap_mru_unlock(c->share);
+ return vm;
+}
+
+int nvmap_mru_init(struct nvmap_share *share)
+{
+ int i;
+ spin_lock_init(&share->mru_lock);
+ share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;
+
+ share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
+ GFP_KERNEL);
+
+ if (!share->mru_lists)
+ return -ENOMEM;
+
+ for (i = 0; i <= share->nr_mru; i++)
+ INIT_LIST_HEAD(&share->mru_lists[i]);
+
+ return 0;
+}
+
+void nvmap_mru_destroy(struct nvmap_share *share)
+{
+ if (share->mru_lists)
+ kfree(share->mru_lists);
+
+ share->mru_lists = NULL;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.h b/drivers/video/tegra/nvmap/nvmap_mru.h
new file mode 100644
index 000000000000..bfc7fceae856
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_mru.h
@@ -0,0 +1,84 @@
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef __VIDEO_TEGRA_NVMAP_MRU_H
+#define __VIDEO_TEGRA_NVMAP_MRU_H
+
+#include <linux/spinlock.h>
+
+#include "nvmap.h"
+
+struct tegra_iovmm_area;
+struct tegra_iovmm_client;
+
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+
+static inline void nvmap_mru_lock(struct nvmap_share *share)
+{
+ spin_lock(&share->mru_lock);
+}
+
+static inline void nvmap_mru_unlock(struct nvmap_share *share)
+{
+ spin_unlock(&share->mru_lock);
+}
+
+int nvmap_mru_init(struct nvmap_share *share);
+
+void nvmap_mru_destroy(struct nvmap_share *share);
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm);
+
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h);
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h);
+
+struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h);
+
+#else
+
+#define nvmap_mru_lock(_s) do { } while (0)
+#define nvmap_mru_unlock(_s) do { } while (0)
+#define nvmap_mru_init(_s) 0
+#define nvmap_mru_destroy(_s) do { } while (0)
+#define nvmap_mru_vm_size(_a) tegra_iovmm_get_vm_size(_a)
+
+static inline void nvmap_mru_insert_locked(struct nvmap_share *share,
+ struct nvmap_handle *h)
+{ }
+
+static inline void nvmap_mru_remove(struct nvmap_share *s,
+ struct nvmap_handle *h)
+{ }
+
+static inline struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ BUG_ON(!h->pgalloc.area);
+ return h->pgalloc.area;
+}
+
+#endif
+
+#endif