diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/ion/Kconfig | 17 | ||||
-rw-r--r-- | drivers/gpu/ion/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/ion/ion.c | 1126 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_carveout_heap.c | 162 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_heap.c | 78 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_iommu_heap.c | 350 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_priv.h | 289 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_system_heap.c | 198 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_system_mapper.c | 114 | ||||
-rw-r--r-- | drivers/gpu/ion/tegra/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/ion/tegra/tegra_ion.c | 595 |
12 files changed, 2934 insertions, 1 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index cc9277885dd0..ca2d3b34dbf5 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -1 +1 @@ -obj-y += drm/ vga/ stub/ +obj-y += drm/ vga/ stub/ ion/ diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig new file mode 100644 index 000000000000..9a8cbdd9836d --- /dev/null +++ b/drivers/gpu/ion/Kconfig @@ -0,0 +1,17 @@ +menuconfig ION + tristate "Ion Memory Manager" + select GENERIC_ALLOCATOR + help + Chose this option to enable the ION Memory Manager. + +config ION_IOMMU + bool + +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION + select TEGRA_IOMMU_SMMU if !ARCH_TEGRA_2x_SOC + select ION_IOMMU if TEGRA_IOMMU_SMMU + help + Choose this option if you wish to use ion on an nVidia Tegra. + diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile new file mode 100644 index 000000000000..4ddc78e9d41d --- /dev/null +++ b/drivers/gpu/ion/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o +obj-$(CONFIG_ION_IOMMU) += ion_iommu_heap.o +obj-$(CONFIG_ION_TEGRA) += tegra/ diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c new file mode 100644 index 000000000000..28b8bb44212b --- /dev/null +++ b/drivers/gpu/ion/ion.c @@ -0,0 +1,1126 @@ +/* + * drivers/gpu/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ + +#include <linux/device.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/ion.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> + +#include "ion_priv.h" +#define DEBUG + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, + struct ion_buffer *buffer) +{ + struct rb_node **p = &dev->buffers.rb_node; + struct rb_node *parent = NULL; + struct ion_buffer *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_buffer, node); + + if (buffer < entry) { + p = &(*p)->rb_left; + } else if (buffer > entry) { + p = &(*p)->rb_right; + } else { + pr_err("buffer already found."); + BUG(); + } + } + + rb_link_node(&buffer->node, parent, p); + rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, + struct ion_device *dev, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + struct ion_buffer *buffer; + int ret; + + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->heap = heap; + kref_init(&buffer->ref); + + ret = heap->ops->allocate(heap, buffer, len, align, flags); + if (ret) { + kfree(buffer); + return ERR_PTR(ret); + } + buffer->dev = dev; + buffer->size = len; + mutex_init(&buffer->lock); + ion_buffer_add(dev, buffer); + return buffer; +} + +static void ion_buffer_destroy(struct kref *kref) +{ + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); + struct ion_device *dev = buffer->dev; + + buffer->heap->ops->free(buffer); + mutex_lock(&dev->lock); + rb_erase(&buffer->node, &dev->buffers); + mutex_unlock(&dev->lock); + kfree(buffer); +} + +void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, ion_buffer_destroy); +} + +struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + rb_init_node(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + handle->buffer = buffer; + + return handle; +} + +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + /* XXX Can a handle be destroyed while it's map count is non-zero?: + if (handle->map_cnt) unmap + */ + ion_buffer_put(handle->buffer); + mutex_lock(&handle->client->lock); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &handle->client->handles); + mutex_unlock(&handle->client->lock); + kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ + return handle->buffer; +} + +void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +int ion_handle_put(struct ion_handle *handle) +{ + return kref_put(&handle->ref, ion_handle_destroy); +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n; + + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + if (handle->buffer == buffer) + return handle; + } + return NULL; +} + +bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *handle_node = rb_entry(n, struct ion_handle, + node); + if (handle < handle_node) + n = n->rb_left; + else if (handle > handle_node) + n = n->rb_right; + else + return true; + } + WARN(1, "invalid handle passed h=0x%x,comm=%d\n", handle, + current->group_leader->comm); + return false; +} + +void ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle < entry) + p = &(*p)->rb_left; + else if (handle > entry) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int flags) +{ + struct rb_node *n; + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + mutex_lock(&dev->lock); + for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { + struct ion_heap *heap = rb_entry(n, struct ion_heap, node); + /* if the client doesn't support this heap type */ + if (!((1 << heap->type) & client->heap_mask)) + continue; + /* if the caller didn't specify this heap type */ + if (!((1 << heap->id) & flags)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR_OR_NULL(buffer)) + break; + } + mutex_unlock(&dev->lock); + + if (IS_ERR_OR_NULL(buffer)) + return ERR_PTR(PTR_ERR(buffer)); + + handle = ion_handle_create(client, buffer); + + if (IS_ERR_OR_NULL(handle)) + goto end; + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + mutex_lock(&client->lock); + ion_handle_add(client, handle); + mutex_unlock(&client->lock); + return handle; + +end: + ion_buffer_put(buffer); + return handle; +} + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + bool valid_handle; + + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + mutex_unlock(&client->lock); + + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + return; + } + ion_handle_put(handle); +} + +static bool _ion_map(int *buffer_cnt, int *handle_cnt) +{ + bool map; + + BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); + + if (*buffer_cnt) + map = false; + else + map = true; + if (*handle_cnt == 0) + (*buffer_cnt)++; + (*handle_cnt)++; + return map; +} + +static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) +{ + BUG_ON(*handle_cnt == 0); + (*handle_cnt)--; + if (*handle_cnt != 0) + return false; + BUG_ON(*buffer_cnt == 0); + (*buffer_cnt)--; + if (*buffer_cnt == 0) + return true; + return false; +} + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_buffer *buffer; + int ret; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + + if (!buffer->heap->ops->phys) { + pr_err("ion_phys is not implemented by this heap.\n"); + mutex_unlock(&client->lock); + return -ENODEV; + } + mutex_unlock(&client->lock); + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); + return ret; +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + WARN(1, "invalid handle passed to map_kernel.\n"); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("map_kernel is not implemented by this heap.\n"); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + if (IS_ERR_OR_NULL(vaddr)) + _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); + buffer->vaddr = vaddr; + } else { + vaddr = buffer->vaddr; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; +} + +struct scatterlist *ion_map_dma(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct scatterlist *sglist; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + WARN(1, "invalid handle passed to map_dma.\n"); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!handle->buffer->heap->ops->map_dma) { + pr_err("map_kernel is not implemented by this heap.\n"); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { + sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); + if (IS_ERR_OR_NULL(sglist)) + _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); + buffer->sglist = sglist; + } else { + sglist = buffer->sglist; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return sglist; +} + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} + +void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->sglist = NULL; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} + + +struct ion_buffer *ion_share(struct ion_client *client, + struct ion_handle *handle) +{ + bool valid_handle; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + mutex_unlock(&client->lock); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); + return ERR_PTR(-EINVAL); + } + + /* do not take an extra reference here, the burden is on the caller + * to make sure the buffer doesn't go away while it's passing it + * to another client -- ion_free should not be called on this handle + * until the buffer has been imported into the other client + */ + return handle->buffer; +} + +struct ion_handle *ion_import(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle = NULL; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR_OR_NULL(handle)) { + ion_handle_get(handle); + goto end; + } + handle = ion_handle_create(client, buffer); + if (IS_ERR_OR_NULL(handle)) { + pr_err("error during handle create\n"); + goto end; + } + ion_handle_add(client, handle); +end: + mutex_unlock(&client->lock); + return handle; +} + +static const struct file_operations ion_share_fops; + +struct ion_handle *ion_import_fd(struct ion_client *client, int fd) +{ + struct file *file = fget(fd); + struct ion_handle *handle; + + if (!file) { + pr_err("imported fd not found in file table.\n"); + return ERR_PTR(-EINVAL); + } + if (file->f_op != &ion_share_fops) { + pr_err("imported file is not a shared ion file.\n"); + handle = ERR_PTR(-EINVAL); + goto end; + } + handle = ion_import(client, file->private_data); +end: + fput(file); + return handle; +} + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + size_t sizes[ION_NUM_HEAPS] = {0}; + const char *names[ION_NUM_HEAPS] = {0}; + int i; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + enum ion_heap_type type = handle->buffer->heap->type; + + if (!names[type]) + names[type] = handle->buffer->heap->name; + sizes[type] += handle->buffer->size; + } + mutex_unlock(&client->lock); + + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); + for (i = 0; i < ION_NUM_HEAPS; i++) { + if (!names[i]) + continue; + seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], + atomic_read(&client->ref.refcount)); + } + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct ion_client *ion_client_lookup(struct ion_device *dev, + struct task_struct *task) +{ + struct rb_node *n = dev->user_clients.rb_node; + struct ion_client *client; + + mutex_lock(&dev->lock); + while (n) { + client = rb_entry(n, struct ion_client, node); + if (task == client->task) { + ion_client_get(client); + mutex_unlock(&dev->lock); + return client; + } else if (task < client->task) { + n = n->rb_left; + } else if (task > client->task) { + n = n->rb_right; + } + } + mutex_unlock(&dev->lock); + return NULL; +} + +struct ion_client *ion_client_create(struct ion_device *dev, + unsigned int heap_mask, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + char debug_name[64]; + pid_t pid; + + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* don't bother to store task struct for kernel threads, + they can't be killed anyway */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + /* if this isn't a kernel thread, see if a client already + exists */ + if (task) { + client = ion_client_lookup(dev, task); + if (!IS_ERR_OR_NULL(client)) { + put_task_struct(current->group_leader); + return client; + } + } + + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); + if (!client) { + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); + } + + client->dev = dev; + client->handles = RB_ROOT; + mutex_init(&client->lock); + client->name = name; + client->heap_mask = heap_mask; + client->task = task; + client->pid = pid; + kref_init(&client->ref); + + mutex_lock(&dev->lock); + if (task) { + p = &dev->user_clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (task < entry->task) + p = &(*p)->rb_left; + else if (task > entry->task) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->user_clients); + } else { + p = &dev->kernel_clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->kernel_clients); + } + + snprintf(debug_name, 64, "%u", client->pid); + client->debug_root = debugfs_create_file(debug_name, 0664, + dev->debug_root, client, + &debug_client_fops); + mutex_unlock(&dev->lock); + + return client; +} + +static void _ion_client_destroy(struct kref *kref) +{ + struct ion_client *client = container_of(kref, struct ion_client, ref); + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("\n"); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } + mutex_lock(&dev->lock); + if (client->task) { + rb_erase(&client->node, &dev->user_clients); + put_task_struct(client->task); + } else { + rb_erase(&client->node, &dev->kernel_clients); + } + debugfs_remove_recursive(client->debug_root); + mutex_unlock(&dev->lock); + + kfree(client); +} + +void ion_client_get(struct ion_client *client) +{ + kref_get(&client->ref); +} + +int ion_client_put(struct ion_client *client) +{ + return kref_put(&client->ref, _ion_client_destroy); +} + +void ion_client_destroy(struct ion_client *client) +{ + ion_client_put(client); +} + +static int ion_share_release(struct inode *inode, struct file* file) +{ + struct ion_buffer *buffer = file->private_data; + + pr_debug("\n"); + /* drop the reference to the buffer -- this prevents the + buffer from going away because the client holding it exited + while it was being passed */ + ion_buffer_put(buffer); + return 0; +} + +static void ion_vma_open(struct vm_area_struct *vma) +{ + + struct ion_buffer *buffer = vma->vm_file->private_data; + struct ion_handle *handle = vma->vm_private_data; + struct ion_client *client; + + pr_debug("\n"); + /* check that the client still exists and take a reference so + it can't go away until this vma is closed */ + client = ion_client_lookup(buffer->dev, current->group_leader); + if (IS_ERR_OR_NULL(client)) { + vma->vm_private_data = NULL; + return; + } + ion_buffer_get(buffer); + ion_handle_get(handle); + pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); +} + +static void ion_vma_close(struct vm_area_struct *vma) +{ + struct ion_handle *handle = vma->vm_private_data; + struct ion_buffer *buffer = vma->vm_file->private_data; + struct ion_client *client; + + pr_debug("\n"); + /* this indicates the client is gone, nothing to do here */ + if (!handle) + return; + client = handle->client; + pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); + ion_handle_put(handle); + ion_client_put(client); + ion_buffer_put(buffer); + pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); +} + +static struct vm_operations_struct ion_vm_ops = { + .open = ion_vma_open, + .close = ion_vma_close, +}; + +static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = file->private_data; + unsigned long size = vma->vm_end - vma->vm_start; + struct ion_client *client; + struct ion_handle *handle; + int ret; + + pr_debug("\n"); + /* make sure the client still exists, it's possible for the client to + have gone away but the map/share fd still to be around, take + a reference to it so it can't go away while this mapping exists */ + client = ion_client_lookup(buffer->dev, current->group_leader); + if (IS_ERR_OR_NULL(client)) { + WARN(1, "trying to mmap an ion handle in a process with no " + "ion client\n"); + return -EINVAL; + } + + if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > + buffer->size)) { + WARN(1, "trying to map larger area than handle has available" + "\n"); + ret = -EINVAL; + goto err; + } + + /* find the handle and take a reference to it */ + handle = ion_import(client, buffer); + if (IS_ERR_OR_NULL(handle)) { + ret = -EINVAL; + goto err; + } + ion_buffer_get(buffer); + + if (!handle->buffer->heap->ops->map_user) { + pr_err("this heap does not define a method for mapping " + "to userspace\n"); + ret = -EINVAL; + goto err1; + } + + mutex_lock(&buffer->lock); + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); + if (ret) { + pr_err("failure mapping buffer to userspace\n"); + goto err1; + } + + vma->vm_ops = &ion_vm_ops; + /* move the handle into the vm_private_data so we can access it from + vma_open/close */ + vma->vm_private_data = handle; + pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); + return 0; + +err1: + /* drop the reference to the handle */ + ion_handle_put(handle); +err: + /* drop the reference to the client */ + ion_client_put(client); + return ret; +} + +static const struct file_operations ion_share_fops = { + .owner = THIS_MODULE, + .release = ion_share_release, + .mmap = ion_share_mmap, +}; + +static int ion_ioctl_share(struct file *parent, struct ion_client *client, + struct ion_handle *handle) +{ + int fd = get_unused_fd(); + struct file *file; + + if (fd < 0) + return -ENFILE; + + file = anon_inode_getfile("ion_share_fd", &ion_share_fops, + handle->buffer, O_RDWR); + if (IS_ERR_OR_NULL(file)) + goto err; + ion_buffer_get(handle->buffer); + fd_install(fd, file); + + return fd; + +err: + put_unused_fd(fd); + return -ENFILE; +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ion_client *client = filp->private_data; + + switch (cmd) { + case ION_IOC_ALLOC: + { + struct ion_allocation_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + data.handle = ion_alloc(client, data.len, data.align, + data.flags); + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + break; + } + case ION_IOC_FREE: + { + struct ion_handle_data data; + bool valid; + + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_handle_data))) + return -EFAULT; + mutex_lock(&client->lock); + valid = ion_handle_validate(client, data.handle); + mutex_unlock(&client->lock); + if (!valid) + return -EINVAL; + ion_free(client, data.handle); + break; + } + case ION_IOC_MAP: + case ION_IOC_SHARE: + { + struct ion_fd_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + mutex_lock(&client->lock); + if (!ion_handle_validate(client, data.handle)) { + WARN(1, "invalid handle passed to share ioctl.\n"); + mutex_unlock(&client->lock); + return -EINVAL; + } + data.fd = ion_ioctl_share(filp, client, data.handle); + mutex_unlock(&client->lock); + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + break; + } + case ION_IOC_IMPORT: + { + struct ion_fd_data data; + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_fd_data))) + return -EFAULT; + + data.handle = ion_import_fd(client, data.fd); + if (IS_ERR(data.handle)) + data.handle = NULL; + if (copy_to_user((void __user *)arg, &data, + sizeof(struct ion_fd_data))) + return -EFAULT; + break; + } + case ION_IOC_CUSTOM: + { + struct ion_device *dev = client->dev; + struct ion_custom_data data; + + if (!dev->custom_ioctl) + return -ENOTTY; + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_custom_data))) + return -EFAULT; + return dev->custom_ioctl(client, data.cmd, data.arg); + } + default: + return -ENOTTY; + } + return 0; +} + +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("\n"); + ion_client_put(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + + pr_debug("\n"); + client = ion_client_create(dev, -1, "user"); + if (IS_ERR_OR_NULL(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + +static const struct file_operations ion_fops = { + .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, + .unlocked_ioctl = ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + enum ion_heap_type type) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->type == type) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + + seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); + for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + char task_comm[TASK_COMM_LEN]; + size_t size = ion_debug_heap_total(client, heap->type); + if (!size) + continue; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, + size); + } + + for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->type); + if (!size) + continue; + seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, + size); + } + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ + struct rb_node **p = &dev->heaps.rb_node; + struct rb_node *parent = NULL; + struct ion_heap *entry; + + heap->dev = dev; + mutex_lock(&dev->lock); + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_heap, node); + + if (heap->id < entry->id) { + p = &(*p)->rb_left; + } else if (heap->id > entry->id ) { + p = &(*p)->rb_right; + } else { + pr_err("can not insert multiple heaps with " + "id %d\n", heap->id); + goto end; + } + } + + rb_link_node(&heap->node, parent, p); + rb_insert_color(&heap->node, &dev->heaps); + debugfs_create_file(heap->name, 0664, dev->debug_root, heap, + &debug_heap_fops); +end: + mutex_unlock(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) +{ + struct ion_device *idev; + int ret; + + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); + if (!idev) + return ERR_PTR(-ENOMEM); + + idev->dev.minor = MISC_DYNAMIC_MINOR; + idev->dev.name = "ion"; + idev->dev.fops = &ion_fops; + idev->dev.parent = NULL; + ret = misc_register(&idev->dev); + if (ret) { + pr_err("ion: failed to register misc device.\n"); + return ERR_PTR(ret); + } + + idev->debug_root = debugfs_create_dir("ion", NULL); + if (IS_ERR_OR_NULL(idev->debug_root)) + pr_err("ion: failed to create debug files.\n"); + + idev->custom_ioctl = custom_ioctl; + idev->buffers = RB_ROOT; + mutex_init(&idev->lock); + idev->heaps = RB_ROOT; + idev->user_clients = RB_ROOT; + idev->kernel_clients = RB_ROOT; + return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + /* XXX need to free the heaps and clients ? */ + kfree(dev); +} + +struct ion_client *ion_client_get_file(int fd) +{ + struct ion_client *client = ERR_PTR(-EFAULT); + struct file *f = fget(fd); + if (!f) + return ERR_PTR(-EINVAL); + + if (f->f_op == &ion_fops) { + client = f->private_data; + ion_client_get(client); + } + + fput(f); + return client; +} diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c new file mode 100644 index 000000000000..606adae13f48 --- /dev/null +++ b/drivers/gpu/ion/ion_carveout_heap.c @@ -0,0 +1,162 @@ +/* + * drivers/gpu/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/spinlock.h> + +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/ion.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion_priv.h" + +#include <asm/mach/map.h> + +struct ion_carveout_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + + if (!offset) + return ION_CARVEOUT_ALLOCATE_FAIL; + + return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; + gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = buffer->priv_phys; + *len = buffer->size; + return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + buffer->priv_phys = ion_carveout_allocate(heap, size, align); + return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + + ion_carveout_free(heap, buffer->priv_phys, buffer->size); + buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; +} + +struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return ERR_PTR(-EINVAL); +} + +void ion_carveout_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +void *ion_carveout_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return __arch_ioremap(buffer->priv_phys, buffer->size, + MT_MEMORY_NONCACHED); +} + +void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + __arch_iounmap(buffer->vaddr); + buffer->vaddr = NULL; + return; +} + +int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + return remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, + buffer->size, + pgprot_noncached(vma->vm_page_prot)); +} + +static struct ion_heap_ops carveout_heap_ops = { + .allocate = ion_carveout_heap_allocate, + .free = ion_carveout_heap_free, + .phys = ion_carveout_heap_phys, + .map_user = ion_carveout_heap_map_user, + .map_kernel = ion_carveout_heap_map_kernel, + .unmap_kernel = ion_carveout_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_carveout_heap *carveout_heap; + + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + if (!carveout_heap) + return ERR_PTR(-ENOMEM); + + carveout_heap->pool = gen_pool_create(12, -1); + if (!carveout_heap->pool) { + kfree(carveout_heap); + return ERR_PTR(-ENOMEM); + } + carveout_heap->base = heap_data->base; + gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, + -1); + carveout_heap->heap.ops = &carveout_heap_ops; + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; + + return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c new file mode 100644 index 000000000000..6d09778745c6 --- /dev/null +++ b/drivers/gpu/ion/ion_heap.c @@ -0,0 +1,78 @@ +/* + * drivers/gpu/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include "ion_priv.h" + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + case ION_HEAP_TYPE_IOMMU: + heap = ion_iommu_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %u\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + case ION_HEAP_TYPE_IOMMU: + ion_iommu_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c new file mode 100644 index 000000000000..f0246cb39fae --- /dev/null +++ b/drivers/gpu/ion/ion_iommu_heap.c @@ -0,0 +1,350 @@ +/* + * drivers/gpu/ion/ion_iommu_heap.c + * + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#define pr_fmt(fmt) "%s(): " fmt, __func__ + +#include <linux/spinlock.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/ion.h> +#include <linux/mm.h> +#include <linux/platform_device.h> +#include <linux/tegra_ion.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/iommu.h> +#include <linux/highmem.h> + +#include <asm/page.h> +#include <asm/cacheflush.h> + +#include "ion_priv.h" + +#define NUM_PAGES(buf) (PAGE_ALIGN((buf)->size) >> PAGE_SHIFT) + +#define GFP_ION (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) + +struct ion_iommu_heap { + struct ion_heap heap; + struct gen_pool *pool; + struct iommu_domain *domain; + struct device *dev; +}; + +static struct scatterlist *iommu_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buf) +{ + struct ion_iommu_heap *h = + container_of(heap, struct ion_iommu_heap, heap); + int err, npages = NUM_PAGES(buf); + unsigned int i; + struct scatterlist *sg; + unsigned long da = (unsigned long)buf->priv_virt; + + for_each_sg(buf->sglist, sg, npages, i) { + phys_addr_t pa; + + pa = sg_phys(sg); + BUG_ON(!ALIGN(sg->length, PAGE_SIZE)); + err = iommu_map(h->domain, da, pa, 0, 0); + if (err) + goto err_out; + + sg->dma_address = da; + da += PAGE_SIZE; + } + + pr_debug("da:%p pa:%08x va:%p\n", + buf->priv_virt, sg_phys(buf->sglist), buf->vaddr); + + return buf->sglist; + +err_out: + if (i-- > 0) { + unsigned int j; + for_each_sg(buf->sglist, sg, i, j) + iommu_unmap(h->domain, sg_dma_address(sg), 0); + } + return ERR_PTR(err); +} + +static void iommu_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buf) +{ + struct ion_iommu_heap *h = + container_of(heap, struct ion_iommu_heap, heap); + unsigned int i; + struct scatterlist *sg; + int npages = NUM_PAGES(buf); + + for_each_sg(buf->sglist, sg, npages, i) + iommu_unmap(h->domain, sg_dma_address(sg), 0); + + pr_debug("da:%p\n", buf->priv_virt); +} + + +static int ion_buffer_allocate(struct ion_buffer *buf) +{ + int i, npages = NUM_PAGES(buf); + + buf->pages = kmalloc(npages * sizeof(*buf->pages), GFP_KERNEL); + if (!buf->pages) + goto err_pages; + + buf->sglist = vmalloc(npages * sizeof(*buf->sglist)); + if (!buf->sglist) + goto err_sgl; + + memset(buf->sglist, 0, npages * sizeof(*buf->sglist)); + sg_init_table(buf->sglist, npages); + + for (i = 0; i < npages; i++) { + struct page *page; + phys_addr_t pa; + + page = alloc_page(GFP_ION); + if (!page) + goto err_pgalloc; + pa = page_to_phys(page); + + sg_set_page(&buf->sglist[i], page, PAGE_SIZE, 0); + + flush_dcache_page(page); + outer_flush_range(pa, pa + PAGE_SIZE); + + buf->pages[i] = page; + + pr_debug_once("pa:%08x\n", pa); + } + return 0; + +err_pgalloc: + while (i-- > 0) + __free_page(buf->pages[i]); + vfree(buf->sglist); +err_sgl: + kfree(buf->pages); +err_pages: + return -ENOMEM; +} + +static void ion_buffer_free(struct ion_buffer *buf) +{ + int i, npages = NUM_PAGES(buf); + + for (i = 0; i < npages; i++) + __free_page(buf->pages[i]); + vfree(buf->sglist); + kfree(buf->pages); +} + +static int iommu_heap_allocate(struct ion_heap *heap, struct ion_buffer *buf, + unsigned long len, unsigned long align, + unsigned long flags) +{ + int err; + struct ion_iommu_heap *h = + container_of(heap, struct ion_iommu_heap, heap); + unsigned long da; + struct scatterlist *sgl; + + len = round_up(len, PAGE_SIZE); + + da = gen_pool_alloc(h->pool, len); + if (!da) { + buf->priv_virt = (void *)ION_CARVEOUT_ALLOCATE_FAIL; + return -ENOMEM; + } + buf->priv_virt = (void *)da; + buf->size = len; + + WARN_ON(!IS_ALIGNED(da, PAGE_SIZE)); + + err = ion_buffer_allocate(buf); + if (err) + goto err_alloc_buf; + + sgl = iommu_heap_map_dma(heap, buf); + if (IS_ERR_OR_NULL(sgl)) + goto err_heap_map_dma; + buf->vaddr = 0; + return 0; + +err_heap_map_dma: + ion_buffer_free(buf); +err_alloc_buf: + gen_pool_free(h->pool, da, len); + buf->size = 0; + buf->pages = NULL; + buf->priv_virt = NULL; + return err; +} + +static void iommu_heap_free(struct ion_buffer *buf) +{ + struct ion_heap *heap = buf->heap; + struct ion_iommu_heap *h = + container_of(heap, struct ion_iommu_heap, heap); + void *da = buf->priv_virt; + + /* + * FIXME: + * Buf should not be in use. + * Forcibly remove iommu mappings, if any exists. + * Free physical pages here. + */ + + if (da == (void *)ION_CARVEOUT_ALLOCATE_FAIL) + return; + + iommu_heap_unmap_dma(heap, buf); + ion_buffer_free(buf); + gen_pool_free(h->pool, (unsigned long)da, buf->size); + + buf->pages = NULL; + buf->priv_virt = NULL; + pr_debug("da:%p\n", da); +} + +static int iommu_heap_phys(struct ion_heap *heap, struct ion_buffer *buf, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = (unsigned long)buf->priv_virt; + *len = buf->size; + pr_debug("da:%08lx(%x)\n", *addr, *len); + return 0; +} + +static void *iommu_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buf) +{ + int npages = NUM_PAGES(buf); + + BUG_ON(!buf->pages); + buf->vaddr = vm_map_ram(buf->pages, npages, -1, + pgprot_noncached(pgprot_kernel)); + pr_debug("va:%p\n", buf->vaddr); + WARN_ON(!buf->vaddr); + return buf->vaddr; +} + +static void iommu_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buf) +{ + int npages = NUM_PAGES(buf); + + BUG_ON(!buf->pages); + WARN_ON(!buf->vaddr); + vm_unmap_ram(buf->vaddr, npages); + buf->vaddr = NULL; + pr_debug("va:%p\n", buf->vaddr); +} + +static int iommu_heap_map_user(struct ion_heap *mapper, + struct ion_buffer *buf, + struct vm_area_struct *vma) +{ + int i = vma->vm_pgoff >> PAGE_SHIFT; + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + + pr_debug("vma:%08lx-%08lx\n", vma->vm_start, vma->vm_end); + BUG_ON(!buf->pages); + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + do { + int ret; + struct page *page = buf->pages[i++]; + + ret = vm_insert_page(vma, uaddr, page); + if (ret) + return ret; + + uaddr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0); + + return 0; +} + +static struct ion_heap_ops iommu_heap_ops = { + .allocate = iommu_heap_allocate, + .free = iommu_heap_free, + .phys = iommu_heap_phys, + .map_dma = iommu_heap_map_dma, + .unmap_dma = iommu_heap_unmap_dma, + .map_kernel = iommu_heap_map_kernel, + .unmap_kernel = iommu_heap_unmap_kernel, + .map_user = iommu_heap_map_user, +}; + +struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *data) +{ + struct ion_iommu_heap *h; + int err; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) { + err = -ENOMEM; + goto err_heap; + } + + h->pool = gen_pool_create(12, -1); + if (!h->pool) { + err = -ENOMEM; + goto err_genpool; + } + gen_pool_add(h->pool, data->base, data->size, -1); + + h->heap.ops = &iommu_heap_ops; + h->domain = iommu_domain_alloc(); + h->dev = data->priv; + if (!h->domain) { + err = -ENOMEM; + goto err_iommu_alloc; + } + + err = iommu_attach_device(h->domain, h->dev); + if (err) + goto err_iommu_attach; + + return &h->heap; + +err_iommu_attach: + iommu_domain_free(h->domain); +err_iommu_alloc: + gen_pool_destroy(h->pool); +err_genpool: + kfree(h); +err_heap: + return ERR_PTR(err); +} + +void ion_iommu_heap_destroy(struct ion_heap *heap) +{ + struct ion_iommu_heap *h = + container_of(heap, struct ion_iommu_heap, heap); + + iommu_detach_device(h->domain, h->dev); + gen_pool_destroy(h->pool); + iommu_domain_free(h->domain); + kfree(h); +} diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h new file mode 100644 index 000000000000..c8415b888e87 --- /dev/null +++ b/drivers/gpu/ion/ion_priv.h @@ -0,0 +1,289 @@ +/* + * drivers/gpu/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/ion.h> +#include <linux/miscdevice.h> + +struct ion_mapping; + +struct ion_dma_mapping { + struct kref ref; + struct scatterlist *sglist; +}; + +struct ion_kernel_mapping { + struct kref ref; + void *vaddr; +}; + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @lock: lock protecting the buffers & heaps trees + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex lock; + struct rb_root heaps; + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root user_clients; + struct rb_root kernel_clients; + struct dentry *debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @ref: for reference counting the client + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @lock: lock protecting the tree of handles + * @heap_mask: mask of all supported heaps + * @name: used for debugging + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct kref ref; + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct mutex lock; + unsigned int heap_mask; + const char *name; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @dmap_cnt: count of times this client has mapped for dma + * @usermap_cnt: count of times this client has mapped for userspace + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + unsigned int dmap_cnt; + unsigned int usermap_cnt; +}; + +bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle); + +void ion_buffer_get(struct ion_buffer *buffer); + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +struct ion_client *ion_client_get_file(int fd); + +void ion_client_get(struct ion_client *client); + +int ion_client_put(struct ion_client *client); + +void ion_handle_get(struct ion_handle *handle); + +int ion_handle_put(struct ion_handle *handle); + +struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer); + +void ion_handle_add(struct ion_client *client, struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: refernce count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sglist: the scatterlist for the buffer is dmap_cnt is not zero + * @pages: list for allocated pages for the buffer + */ +struct ion_buffer { + struct kref ref; + struct rb_node node; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + size_t size; + union { + void *priv_virt; + ion_phys_addr_t priv_phys; + }; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct scatterlist *sglist; + struct page **pages; +}; + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @phys get physical address of a buffer (only define on + * physically contiguous heaps) + * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + */ +struct ion_heap_ops { + int (*allocate) (struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free) (struct ion_buffer *buffer); + int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len); + struct scatterlist *(*map_dma) (struct ion_heap *heap, + struct ion_buffer *buffer); + void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); +}; + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct rb_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + int id; + const char *name; +}; + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, + unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size); +#ifdef CONFIG_ION_IOMMU +struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *); +void ion_iommu_heap_destroy(struct ion_heap *); +#else +static inline struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *) +{ + return NULL; +} +static inline void ion_iommu_heap_destroy(struct ion_heap *) +{ +} +#endif +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +#endif /* _ION_PRIV_H */ diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c new file mode 100644 index 000000000000..c046cf1a3219 --- /dev/null +++ b/drivers/gpu/ion/ion_system_heap.c @@ -0,0 +1,198 @@ +/* + * drivers/gpu/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion_priv.h" + +static int ion_system_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + buffer->priv_virt = vmalloc_user(size); + if (!buffer->priv_virt) + return -ENOMEM; + return 0; +} + +void ion_system_heap_free(struct ion_buffer *buffer) +{ + vfree(buffer->priv_virt); +} + +struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + struct page *page; + int i; + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + void *vaddr = buffer->priv_virt; + + sglist = vmalloc(npages * sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + memset(sglist, 0, npages * sizeof(struct scatterlist)); + sg_init_table(sglist, npages); + for (i = 0; i < npages; i++) { + page = vmalloc_to_page(vaddr); + if (!page) + goto end; + sg_set_page(&sglist[i], page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + /* XXX do cache maintenance for dma? */ + return sglist; +end: + vfree(sglist); + return NULL; +} + +void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + /* XXX undo cache maintenance for dma? */ + if (buffer->sglist) + vfree(buffer->sglist); +} + +void *ion_system_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +void ion_system_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); +} + +static struct ion_heap_ops vmalloc_ops = { + .allocate = ion_system_heap_allocate, + .free = ion_system_heap_free, + .map_dma = ion_system_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_system_heap_map_kernel, + .unmap_kernel = ion_system_heap_unmap_kernel, + .map_user = ion_system_heap_map_user, +}; + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &vmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM; + return heap; +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + buffer->priv_virt = kzalloc(len, GFP_KERNEL); + if (!buffer->priv_virt) + return -ENOMEM; + return 0; +} + +void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ + kfree(buffer->priv_virt); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = virt_to_phys(buffer->priv_virt); + *len = buffer->size; + return 0; +} + +struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + + sglist = vmalloc(sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + sg_init_table(sglist, 1); + sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0); + return sglist; +} + +int ion_system_contig_heap_map_user(struct ion_heap *heap, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + +} + +static struct ion_heap_ops kmalloc_ops = { + .allocate = ion_system_contig_heap_allocate, + .free = ion_system_contig_heap_free, + .phys = ion_system_contig_heap_phys, + .map_dma = ion_system_contig_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_system_heap_map_kernel, + .unmap_kernel = ion_system_heap_unmap_kernel, + .map_user = ion_system_contig_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &kmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; + return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c new file mode 100644 index 000000000000..692458e07b5e --- /dev/null +++ b/drivers/gpu/ion/ion_system_mapper.c @@ -0,0 +1,114 @@ +/* + * drivers/gpu/ion/ion_system_mapper.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include <linux/memory.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion_priv.h" +/* + * This mapper is valid for any heap that allocates memory that already has + * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, + * pages obtained via io_remap, etc. + */ +static void *ion_kernel_mapper_map(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping **mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { + pr_err("%s: attempting to map an unsupported heap\n", __func__); + return ERR_PTR(-EINVAL); + } + /* XXX REVISIT ME!!! */ + *((unsigned long *)mapping) = (unsigned long)buffer->priv; + return buffer->priv; +} + +static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping *mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) + pr_err("%s: attempting to unmap an unsupported heap\n", + __func__); +} + +static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping *mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { + pr_err("%s: attempting to unmap an unsupported heap\n", + __func__); + return ERR_PTR(-EINVAL); + } + return buffer->priv; +} + +static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct vm_area_struct *vma, + struct ion_mapping *mapping) +{ + int ret; + + switch (buffer->heap->type) { + case ION_HEAP_KMALLOC: + { + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); + ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + break; + } + case ION_HEAP_VMALLOC: + ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); + break; + default: + pr_err("%s: attempting to map unsupported heap to userspace\n", + __func__); + return -EINVAL; + } + + return ret; +} + +static struct ion_mapper_ops ops = { + .map = ion_kernel_mapper_map, + .map_kernel = ion_kernel_mapper_map_kernel, + .map_user = ion_kernel_mapper_map_user, + .unmap = ion_kernel_mapper_unmap, +}; + +struct ion_mapper *ion_system_mapper_create(void) +{ + struct ion_mapper *mapper; + mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); + if (!mapper) + return ERR_PTR(-ENOMEM); + mapper->type = ION_SYSTEM_MAPPER; + mapper->ops = &ops; + mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); + return mapper; +} + +void ion_system_mapper_destroy(struct ion_mapper *mapper) +{ + kfree(mapper); +} + diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile new file mode 100644 index 000000000000..11cd003fb08f --- /dev/null +++ b/drivers/gpu/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c new file mode 100644 index 000000000000..65335d265362 --- /dev/null +++ b/drivers/gpu/ion/tegra/tegra_ion.c @@ -0,0 +1,595 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2011, NVIDIA Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ + +#include <linux/err.h> +#include <linux/ion.h> +#include <linux/tegra_ion.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/syscalls.h> +#include <linux/io.h> +#include "../ion_priv.h" + +#define CLIENT_HEAP_MASK 0xFFFFFFFF +#define HEAP_FLAGS 0xFF + +#if !defined(CONFIG_TEGRA_NVMAP) +#include "mach/nvmap.h" +struct nvmap_device *nvmap_dev; +#endif + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_pin(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + struct tegra_ion_pin_data data; + int ret; + struct ion_handle *on_stack[16]; + struct ion_handle **refs = on_stack; + int i; + bool valid_handle; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + if (data.count) { + size_t bytes = data.count * sizeof(struct ion_handle *); + + if (data.count > ARRAY_SIZE(on_stack)) + refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL); + else + refs = on_stack; + if (!refs) + return -ENOMEM; + if (copy_from_user(refs, (void *)data.handles, bytes)) { + ret = -EFAULT; + goto err; + } + } else + return -EINVAL; + + mutex_lock(&client->lock); + for (i = 0; i < data.count; i++) { + /* Ignore NULL pointers during unpin operation. */ + if (!refs[i] && cmd == TEGRA_ION_UNPIN) + continue; + valid_handle = ion_handle_validate(client, refs[i]); + if (!valid_handle) { + WARN(1, "invalid handle passed h=0x%x", (u32)refs[i]); + mutex_unlock(&client->lock); + ret = -EINVAL; + goto err; + } + } + mutex_unlock(&client->lock); + + if (cmd == TEGRA_ION_PIN) { + ion_phys_addr_t addr; + size_t len; + + for (i = 0; i < data.count; i++) { + ret = ion_phys(client, refs[i], &addr, &len); + if (ret) + goto err; + ion_handle_get(refs[i]); + ret = put_user(addr, &data.addr[i]); + if (ret) + return ret; + } + } else if (cmd == TEGRA_ION_UNPIN) { + for (i = 0; i < data.count; i++) { + if (refs[i]) + ion_handle_put(refs[i]); + } + } + +err: + if (ret) { + pr_err("error, ret=0x%x", ret); + /* FIXME: undo pinning. */ + } + if (refs != on_stack) + kfree(refs); + return ret; +} + +static int tegra_ion_alloc_from_id(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + struct tegra_ion_id_data data; + struct ion_buffer *buffer; + struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + buffer = (struct ion_buffer *)data.id; + data.handle = ion_import(client, buffer); + data.size = buffer->size; + if (put_user(data.handle, &user_data->handle)) + return -EFAULT; + if (put_user(data.size, &user_data->size)) + return -EFAULT; + return 0; +} + +static int tegra_ion_get_id(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + bool valid_handle; + struct tegra_ion_id_data data; + struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, data.handle); + mutex_unlock(&client->lock); + + if (!valid_handle) { + WARN(1, "invalid handle passed\n"); + return -EINVAL; + } + + pr_debug("h=0x%x, b=0x%x, bref=%d", + (u32)data.handle, (u32)data.handle->buffer, + atomic_read(&data.handle->buffer->ref.refcount)); + if (put_user((unsigned long)ion_handle_buffer(data.handle), + &user_data->id)) + return -EFAULT; + return 0; +} + +static int tegra_ion_cache_maint(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + wmb(); + return 0; +} + +static int tegra_ion_rw(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + bool valid_handle; + struct tegra_ion_rw_data data; + char *kern_addr, *src; + int ret = 0; + size_t copied = 0; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + if (!data.handle || !data.addr || !data.count || !data.elem_size) + return -EINVAL; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, data.handle); + mutex_unlock(&client->lock); + + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to get id.\n", __func__); + return -EINVAL; + } + + if (data.elem_size == data.mem_stride && + data.elem_size == data.user_stride) { + data.elem_size *= data.count; + data.mem_stride = data.elem_size; + data.user_stride = data.elem_size; + data.count = 1; + } + + kern_addr = ion_map_kernel(client, data.handle); + + while (data.count--) { + if (data.offset + data.elem_size > data.handle->buffer->size) { + WARN(1, "read/write outside of handle\n"); + ret = -EFAULT; + break; + } + + src = kern_addr + data.offset; + if (cmd == TEGRA_ION_READ) + ret = copy_to_user((void *)data.addr, + src, data.elem_size); + else + ret = copy_from_user(src, + (void *)data.addr, data.elem_size); + + if (ret) + break; + + copied += data.elem_size; + data.addr += data.user_stride; + data.offset += data.mem_stride; + } + + ion_unmap_kernel(client, data.handle); + return ret; +} + +static int tegra_ion_get_param(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + bool valid_handle; + struct tegra_ion_get_params_data data; + struct tegra_ion_get_params_data *user_data = + (struct tegra_ion_get_params_data *)arg; + struct ion_buffer *buffer; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, data.handle); + mutex_unlock(&client->lock); + + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to get id.\n", __func__); + return -EINVAL; + } + + buffer = ion_handle_buffer(data.handle); + data.align = 4096; + data.heap = 1; + ion_phys(client, data.handle, &data.addr, &data.size); + + if (copy_to_user(user_data, &data, sizeof(data))) + return -EFAULT; + + return 0; +} + +static long tegra_ion_ioctl(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOTTY; + + switch (cmd) { + case TEGRA_ION_ALLOC_FROM_ID: + ret = tegra_ion_alloc_from_id(client, cmd, arg); + break; + case TEGRA_ION_GET_ID: + ret = tegra_ion_get_id(client, cmd, arg); + break; + case TEGRA_ION_PIN: + case TEGRA_ION_UNPIN: + ret = tegra_ion_pin(client, cmd, arg); + break; + case TEGRA_ION_CACHE_MAINT: + ret = tegra_ion_cache_maint(client, cmd, arg); + break; + case TEGRA_ION_READ: + case TEGRA_ION_WRITE: + ret = tegra_ion_rw(client, cmd, arg); + break; + case TEGRA_ION_GET_PARAM: + ret = tegra_ion_get_param(client, cmd, arg); + break; + default: + WARN(1, "Unknown custom ioctl\n"); + return -ENOTTY; + } + return ret; +} + +int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int i; + + num_heaps = pdata->nr; + + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); + + idev = ion_device_create(tegra_ion_ioctl); + if (IS_ERR_OR_NULL(idev)) { + kfree(heaps); + return PTR_ERR(idev); + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + pr_warn("%s(type:%d id:%d) isn't supported\n", + heap_data->name, + heap_data->type, heap_data->id); + continue; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); +#if !defined(CONFIG_TEGRA_NVMAP) + nvmap_dev = (struct nvmap_device *)idev; +#endif + return 0; +} + +int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +static int __init ion_init(void) +{ + return platform_driver_register(&ion_driver); +} + +static void __exit ion_exit(void) +{ + platform_driver_unregister(&ion_driver); +} + +fs_initcall(ion_init); +module_exit(ion_exit); + +#if !defined(CONFIG_TEGRA_NVMAP) +struct nvmap_client *nvmap_create_client(struct nvmap_device *dev, + const char *name) +{ + return ion_client_create(dev, CLIENT_HEAP_MASK, name); +} + +struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size, + size_t align, unsigned int flags) +{ + return ion_alloc(client, size, align, HEAP_FLAGS); +} + +void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r) +{ + ion_free(client, r); +} + +void *nvmap_mmap(struct nvmap_handle_ref *r) +{ + return ion_map_kernel(r->client, r); +} + +void nvmap_munmap(struct nvmap_handle_ref *r, void *addr) +{ + ion_unmap_kernel(r->client, r); +} + +struct nvmap_client *nvmap_client_get_file(int fd) +{ + return ion_client_get_file(fd); +} + +struct nvmap_client *nvmap_client_get(struct nvmap_client *client) +{ + ion_client_get(client); + return client; +} + +void nvmap_client_put(struct nvmap_client *c) +{ + ion_client_put(c); +} + +phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r) +{ + ion_phys_addr_t addr; + size_t len; + + ion_handle_get(r); + ion_phys(c, r, &addr, &len); + wmb(); + return addr; +} + +phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) +{ + struct ion_handle *handle; + ion_phys_addr_t addr; + size_t len; + + handle = nvmap_convert_handle_u2k(id); + ion_phys(c, handle, &addr, &len); + return addr; +} + +void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r) +{ + if (r) + ion_handle_put(r); +} + +static int nvmap_reloc_pin_array(struct ion_client *client, + const struct nvmap_pinarray_elem *arr, + int nr, struct ion_handle *gather) +{ + struct ion_handle *last_patch = NULL; + void *patch_addr; + ion_phys_addr_t pin_addr; + size_t len; + int i; + + for (i = 0; i < nr; i++) { + struct ion_handle *patch; + struct ion_handle *pin; + ion_phys_addr_t reloc_addr; + + /* all of the handles are validated and get'ted prior to + * calling this function, so casting is safe here */ + pin = (struct ion_handle *)arr[i].pin_mem; + + if (arr[i].patch_mem == (unsigned long)last_patch) { + patch = last_patch; + } else if (arr[i].patch_mem == (unsigned long)gather) { + patch = gather; + } else { + if (last_patch) + ion_handle_put(last_patch); + + ion_handle_get((struct ion_handle *)arr[i].patch_mem); + patch = (struct ion_handle *)arr[i].patch_mem; + if (!patch) + return -EPERM; + last_patch = patch; + } + + patch_addr = ion_map_kernel(client, patch); + patch_addr = patch_addr + arr[i].patch_offset; + + ion_phys(client, pin, &pin_addr, &len); + reloc_addr = pin_addr + arr[i].pin_offset; + __raw_writel(reloc_addr, patch_addr); + ion_unmap_kernel(client, patch); + } + + if (last_patch) + ion_handle_put(last_patch); + + wmb(); + return 0; +} + +int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather, + const struct nvmap_pinarray_elem *arr, int nr, + struct nvmap_handle **unique) +{ + int i; + int count = 0; + + /* FIXME: take care of duplicate ones & validation. */ + for (i = 0; i < nr; i++) { + unique[i] = (struct nvmap_handle *)arr[i].pin_mem; + nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]); + count++; + } + nvmap_reloc_pin_array((struct ion_client *)client, + arr, nr, (struct ion_handle *)gather); + return nr; +} + +void nvmap_unpin_handles(struct nvmap_client *client, + struct nvmap_handle **h, int nr) +{ + int i; + + for (i = 0; i < nr; i++) + nvmap_unpin(client, h[i]); +} + +int nvmap_patch_word(struct nvmap_client *client, + struct nvmap_handle *patch, + u32 patch_offset, u32 patch_value) +{ + void *vaddr; + u32 *patch_addr; + + vaddr = ion_map_kernel(client, patch); + patch_addr = vaddr + patch_offset; + __raw_writel(patch_value, patch_addr); + wmb(); + ion_unmap_kernel(client, patch); + return 0; +} + +struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h); +struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client, + unsigned long id) +{ + struct ion_handle *handle; + + handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); + pr_debug("id=0x%x, h=0x%x,c=0x%x", + (u32)id, (u32)handle, (u32)client); + nvmap_handle_get(handle); + return handle; +} + +struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, + unsigned long id) +{ + struct ion_buffer *buffer; + struct ion_handle *handle; + struct ion_client *ion_client = client; + + handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); + pr_debug("id=0x%x, h=0x%x,c=0x%x", + (u32)id, (u32)handle, (u32)client); + buffer = handle->buffer; + + handle = ion_handle_create(client, buffer); + + mutex_lock(&ion_client->lock); + ion_handle_add(ion_client, handle); + mutex_unlock(&ion_client->lock); + + pr_debug("dup id=0x%x, h=0x%x", (u32)id, (u32)handle); + return handle; +} + +void _nvmap_handle_free(struct nvmap_handle *h) +{ + ion_handle_put(h); +} + +struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, + size_t size, size_t align, unsigned int flags, unsigned int iova_start) +{ + /* FIXME: */ + return NULL; +} + +void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r) +{ + ion_free(client, r); +} + +struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h) +{ + ion_handle_get(h); + return h; +} + +void nvmap_handle_put(struct nvmap_handle *h) +{ + ion_handle_put(h); +} + +#endif |