summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorKrishna Reddy <vdumpa@nvidia.com>2011-12-19 14:35:48 -0800
committerKrishna Reddy <vdumpa@nvidia.com>2011-12-29 17:10:18 -0800
commit3b1a4102d33699c713d32aea8b1621ee53e16819 (patch)
tree8f7fe976b96a9e906da60a63ab0441f28ee52373 /drivers/gpu
parente3763055fa85f6e815d1cd8c26cb4453bf0869a7 (diff)
gpu: ion: tegra: add tegra custom ioctl handling.
Add tegra specific custom ioctl handling. Change-Id: If87b47f969fe0b3fdbb3bee965c370f6610fd4be Signed-off-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-on: http://git-master/r/71113 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Hiroshi Doyu <hdoyu@nvidia.com> Tested-by: Hiroshi Doyu <hdoyu@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/ion/tegra/tegra_ion.c515
1 files changed, 509 insertions, 6 deletions
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
index 7af6e168ff4c..ab5f3f923f8b 100644
--- a/drivers/gpu/ion/tegra/tegra_ion.c
+++ b/drivers/gpu/ion/tegra/tegra_ion.c
@@ -2,6 +2,7 @@
* drivers/gpu/tegra/tegra_ion.c
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2011, NVIDIA Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,14 +17,282 @@
#include <linux/err.h>
#include <linux/ion.h>
+#include <linux/tegra_ion.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/io.h>
#include "../ion_priv.h"
-struct ion_device *idev;
-struct ion_mapper *tegra_user_mapper;
-int num_heaps;
-struct ion_heap **heaps;
+#if !defined(CONFIG_TEGRA_NVMAP)
+#include "mach/nvmap.h"
+struct nvmap_device *nvmap_dev;
+#endif
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_pin(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_ion_pin_data data;
+ int ret;
+ struct ion_handle *on_stack[16];
+ struct ion_handle **refs = on_stack;
+ int i;
+ bool valid_handle;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ if (data.count) {
+ size_t bytes = data.count * sizeof(struct ion_handle *);
+
+ if (data.count > ARRAY_SIZE(on_stack))
+ refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL);
+ else
+ refs = on_stack;
+ if (!refs)
+ return -ENOMEM;
+ if (copy_from_user(refs, (void *)data.handles, bytes)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ } else
+ return -EINVAL;
+
+ mutex_lock(&client->lock);
+ for (i = 0; i < data.count; i++) {
+ valid_handle = ion_handle_validate(client, refs[i]);
+ if (!valid_handle) {
+ pr_err("invalid handle passed to tegra_ion_pin, h=0x%x",
+ (u32)refs[i]);
+ mutex_unlock(&client->lock);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ mutex_unlock(&client->lock);
+
+ if (cmd == TEGRA_ION_PIN) {
+ ion_phys_addr_t addr;
+ size_t len;
+
+ for (i = 0; i < data.count; i++) {
+ ret = ion_phys(client, refs[i], &addr, &len);
+ if (ret)
+ goto err;
+ ion_handle_get(refs[i]);
+ ret = put_user(addr, &data.addr[i]);
+ if (ret)
+ return ret;
+ }
+ } else if (cmd == TEGRA_ION_UNPIN) {
+ /* FIXME: unpin it. */
+ for (i = 0; i < data.count; i++)
+ ion_handle_put(refs[i]);
+ }
+
+err:
+ if (ret) {
+ pr_err("\n*****%s: error, ret=0x%x", __func__, ret);
+ /* FIXME: undo pinning. */
+ }
+ if (refs != on_stack)
+ kfree(refs);
+ return ret;
+}
+
+static int tegra_ion_alloc_from_id(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_ion_id_data data;
+ struct ion_buffer *buffer;
+ struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg;
+
+ pr_debug("%s", __func__);
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ buffer = (struct ion_buffer *)data.id;
+ data.handle = ion_import(client, buffer);
+ data.size = buffer->size;
+ if (put_user(data.handle, &user_data->handle))
+ return -EFAULT;
+ if (put_user(data.size, &user_data->size))
+ return -EFAULT;
+ return 0;
+}
+
+static int tegra_ion_get_id(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ bool valid_handle;
+ struct tegra_ion_id_data data;
+ struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+
+ if (!valid_handle) {
+ pr_err("%s: invalid handle passed, h=0x%x\n",
+ __func__, (u32)data.handle);
+ WARN("%s: invalid handle passed\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: h=0x%x, b=0x%x, bref=%d", __func__,
+ (u32)data.handle, (u32)data.handle->buffer,
+ atomic_read(&data.handle->buffer->ref.refcount));
+ if (put_user((unsigned long)ion_handle_buffer(data.handle),
+ &user_data->id))
+ return -EFAULT;
+ return 0;
+}
+
+static int tegra_ion_cache_maint(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ wmb();
+ return 0;
+}
+
+static int tegra_ion_rw(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ bool valid_handle;
+ struct tegra_ion_rw_data data;
+ char *kern_addr, *src;
+ int ret = 0;
+ size_t copied = 0;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ if (!data.handle || !data.addr || !data.count || !data.elem_size)
+ return -EINVAL;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to get id.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (data.elem_size == data.mem_stride &&
+ data.elem_size == data.user_stride) {
+ data.elem_size *= data.count;
+ data.mem_stride = data.elem_size;
+ data.user_stride = data.elem_size;
+ data.count = 1;
+ }
+
+ kern_addr = ion_map_kernel(client, data.handle);
+
+ while (data.count--) {
+ if (data.offset + data.elem_size > data.handle->buffer->size) {
+ pr_err("%s: read/write outside of handle\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ src = kern_addr + data.offset;
+ if (cmd == TEGRA_ION_READ)
+ ret = copy_to_user((void *)data.addr,
+ src, data.elem_size);
+ else
+ ret = copy_from_user(src,
+ (void *)data.addr, data.elem_size);
+
+ if (ret)
+ break;
+
+ copied += data.elem_size;
+ data.addr += data.user_stride;
+ data.offset += data.mem_stride;
+ }
+
+ ion_unmap_kernel(client, data.handle);
+ return ret;
+}
+
+static int tegra_ion_get_param(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ bool valid_handle;
+ struct tegra_ion_get_params_data data;
+ struct tegra_ion_get_params_data *user_data =
+ (struct tegra_ion_get_params_data *)arg;
+ struct ion_buffer *buffer;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to get id.\n", __func__);
+ return -EINVAL;
+ }
+
+ buffer = ion_handle_buffer(data.handle);
+ data.align = 4096;
+ data.heap = 1;
+ ion_phys(client, data.handle, &data.addr, &data.size);
+
+ if (copy_to_user(user_data, &data, sizeof(data)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long tegra_ion_ioctl(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -ENOTTY;
+
+ switch (cmd) {
+ case TEGRA_ION_ALLOC_FROM_ID:
+ ret = tegra_ion_alloc_from_id(client, cmd, arg);
+ break;
+ case TEGRA_ION_GET_ID:
+ ret = tegra_ion_get_id(client, cmd, arg);
+ break;
+ case TEGRA_ION_PIN:
+ case TEGRA_ION_UNPIN:
+ ret = tegra_ion_pin(client, cmd, arg);
+ break;
+ case TEGRA_ION_CACHE_MAINT:
+ ret = tegra_ion_cache_maint(client, cmd, arg);
+ break;
+ case TEGRA_ION_READ:
+ case TEGRA_ION_WRITE:
+ ret = tegra_ion_rw(client, cmd, arg);
+ break;
+ case TEGRA_ION_GET_PARAM:
+ ret = tegra_ion_get_param(client, cmd, arg);
+ break;
+ default:
+ pr_err("%s: Unknown custom ioctl\n", __func__);
+ return -ENOTTY;
+ }
+ return ret;
+}
int tegra_ion_probe(struct platform_device *pdev)
{
@@ -35,7 +304,7 @@ int tegra_ion_probe(struct platform_device *pdev)
heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
- idev = ion_device_create(NULL);
+ idev = ion_device_create(tegra_ion_ioctl);
if (IS_ERR_OR_NULL(idev)) {
kfree(heaps);
return PTR_ERR(idev);
@@ -53,6 +322,9 @@ int tegra_ion_probe(struct platform_device *pdev)
ion_device_add_heap(idev, heaps[i]);
}
platform_set_drvdata(pdev, idev);
+#if !defined(CONFIG_TEGRA_NVMAP)
+ nvmap_dev = (struct nvmap_device *)idev;
+#endif
return 0;
err:
for (i = 0; i < num_heaps; i++) {
@@ -91,6 +363,237 @@ static void __exit ion_exit(void)
platform_driver_unregister(&ion_driver);
}
-module_init(ion_init);
+fs_initcall(ion_init);
module_exit(ion_exit);
+#if !defined(CONFIG_TEGRA_NVMAP)
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name)
+{
+ return ion_client_create(dev, 0xFFFFFFFF, name);
+}
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags)
+{
+ return ion_alloc(client, size, align, 1);
+}
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ ion_free(client, r);
+}
+
+void *nvmap_mmap(struct nvmap_handle_ref *r)
+{
+ return ion_map_kernel(r->client, r);
+}
+
+void nvmap_munmap(struct nvmap_handle_ref *r, void *addr)
+{
+ ion_unmap_kernel(r->client, r);
+}
+
+struct nvmap_client *nvmap_client_get_file(int fd)
+{
+ return ion_client_get_file(fd);
+}
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
+{
+ ion_client_get(client);
+ return client;
+}
+
+void nvmap_client_put(struct nvmap_client *c)
+{
+ ion_client_put(c);
+}
+
+phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r)
+{
+ ion_phys_addr_t addr;
+ size_t len;
+
+ ion_handle_get(r);
+ ion_phys(c, r, &addr, &len);
+ wmb();
+ return addr;
+}
+
+phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
+{
+ struct ion_buffer *buffer = (struct ion_buffer *)id;
+ struct ion_handle *handle;
+ ion_phys_addr_t addr;
+ size_t len;
+
+ handle = ion_import(c, buffer);
+ ion_phys(c, handle, &addr, &len);
+ ion_handle_put(handle);
+ return addr;
+}
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ /* FIXME: this should be implemented for iommu heap. */
+ ion_handle_put(r);
+}
+
+static int nvmap_reloc_pin_array(struct ion_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct ion_handle *gather)
+{
+ struct ion_handle *last_patch = NULL;
+ void *patch_addr;
+ ion_phys_addr_t pin_addr;
+ size_t len;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct ion_handle *patch;
+ struct ion_handle *pin;
+ ion_phys_addr_t reloc_addr;
+
+ /* all of the handles are validated and get'ted prior to
+ * calling this function, so casting is safe here */
+ pin = (struct ion_handle *)arr[i].pin_mem;
+
+ if (arr[i].patch_mem == (unsigned long)last_patch) {
+ patch = last_patch;
+ } else if (arr[i].patch_mem == (unsigned long)gather) {
+ patch = gather;
+ } else {
+ if (last_patch)
+ ion_handle_put(last_patch);
+
+ ion_handle_get((struct ion_handle *)arr[i].patch_mem);
+ patch = (struct ion_handle *)arr[i].patch_mem;
+ if (!patch)
+ return -EPERM;
+ last_patch = patch;
+ }
+
+ patch_addr = ion_map_kernel(client, patch);
+ patch_addr = patch_addr + arr[i].patch_offset;
+
+ ion_phys(client, pin, &pin_addr, &len);
+ reloc_addr = pin_addr + arr[i].pin_offset;
+ __raw_writel(reloc_addr, patch_addr);
+ ion_unmap_kernel(client, patch);
+ }
+
+ if (last_patch)
+ ion_handle_put(last_patch);
+
+ wmb();
+ return 0;
+}
+
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique)
+{
+ int i;
+ int count = 0;
+
+ /* FIXME: take care of duplicate ones & validation. */
+ for (i = 0; i < nr; i++) {
+ unique[i] = (struct nvmap_handle *)arr[i].pin_mem;
+ nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]);
+ count++;
+ }
+ nvmap_reloc_pin_array((struct ion_client *)client,
+ arr, nr, (struct ion_handle *)gather);
+ return nr;
+}
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++)
+ nvmap_unpin(client, h[i]);
+}
+
+int nvmap_patch_word(struct nvmap_client *client,
+ struct nvmap_handle *patch,
+ u32 patch_offset, u32 patch_value)
+{
+ void *vaddr;
+ u32 *patch_addr;
+
+ vaddr = ion_map_kernel(client, patch);
+ patch_addr = vaddr + patch_offset;
+ __raw_writel(patch_value, patch_addr);
+ wmb();
+ ion_unmap_kernel(client, patch);
+ return 0;
+}
+
+struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct ion_handle *handle;
+
+ handle = (struct ion_handle *)*((unsigned int*)id);
+ pr_debug("%s: id=0x%x, h=0x%x,c=0x%x",
+ __func__, (u32)id, (u32)handle, (u32)client);
+ nvmap_handle_get(handle);
+ return handle;
+}
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+ struct ion_client *ion_client = client;
+
+ handle = (struct ion_handle *)*((unsigned int*)id);
+ pr_debug("%s: id=0x%x, h=0x%x,c=0x%x",
+ __func__, (u32)id, (u32)handle, (u32)client);
+ buffer = handle->buffer;
+
+ handle = ion_handle_create(client, buffer);
+
+ mutex_lock(&ion_client->lock);
+ ion_handle_add(ion_client, handle);
+ mutex_unlock(&ion_client->lock);
+
+ pr_debug("%s: dup id=0x%x, h=0x%x", __func__,
+ (u32)id, (u32)handle);
+ return handle;
+}
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+ ion_handle_put(h);
+}
+
+struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
+ size_t size, size_t align, unsigned int flags, unsigned int iova_start)
+{
+ /* FIXME: */
+ return NULL;
+}
+
+void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ /* FIXME: */
+}
+
+struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
+{
+ ion_handle_get(h);
+ return h;
+}
+
+void nvmap_handle_put(struct nvmap_handle *h)
+{
+ ion_handle_put(h);
+}
+
+#endif