summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/radeon_object.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-17 12:52:15 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-17 12:56:49 +0200
commiteadb8a091b27a840de7450f84ecff5ef13476424 (patch)
tree58c3782d40def63baa8167f3d31e3048cb4c7660 /drivers/gpu/drm/radeon/radeon_object.c
parent73874005cd8800440be4299bd095387fff4b90ac (diff)
parent65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff)
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/Kconfig arch/x86/kernel/traps.c arch/x86/power/cpu.c arch/x86/power/cpu_32.c kernel/Makefile Semantic conflict: arch/x86/kernel/hw_breakpoint.c Merge reason: Resolve the conflicts, move from put_cpu_no_sched() to put_cpu() in arch/x86/kernel/hw_breakpoint.c. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c511
1 files changed, 511 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 000000000000..983e8df5e000
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Dave Airlie
+ */
+#include <linux/list.h>
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+struct radeon_object {
+ struct ttm_buffer_object tobj;
+ struct list_head list;
+ struct radeon_device *rdev;
+ struct drm_gem_object *gobj;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ uint64_t gpu_addr;
+ void *kptr;
+ bool is_iomem;
+};
+
+int radeon_ttm_init(struct radeon_device *rdev);
+void radeon_ttm_fini(struct radeon_device *rdev);
+
+/*
+ * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+ * function are calling it.
+ */
+
+static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+{
+ return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
+}
+
+static void radeon_object_unreserve(struct radeon_object *robj)
+{
+ ttm_bo_unreserve(&robj->tobj);
+}
+
+static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+{
+ struct radeon_object *robj;
+
+ robj = container_of(tobj, struct radeon_object, tobj);
+ list_del_init(&robj->list);
+ kfree(robj);
+}
+
+static inline void radeon_object_gpu_addr(struct radeon_object *robj)
+{
+ /* Default gpu address */
+ robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+ if (robj->tobj.mem.mm_node == NULL) {
+ return;
+ }
+ robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
+ switch (robj->tobj.mem.mem_type) {
+ case TTM_PL_VRAM:
+ robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
+ break;
+ case TTM_PL_TT:
+ robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
+ break;
+ default:
+ DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
+ robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+ return;
+ }
+}
+
+static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
+{
+ uint32_t flags = 0;
+ if (domain & RADEON_GEM_DOMAIN_VRAM) {
+ flags |= TTM_PL_FLAG_VRAM;
+ }
+ if (domain & RADEON_GEM_DOMAIN_GTT) {
+ flags |= TTM_PL_FLAG_TT;
+ }
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ flags |= TTM_PL_FLAG_SYSTEM;
+ }
+ if (!flags) {
+ flags |= TTM_PL_FLAG_SYSTEM;
+ }
+ return flags;
+}
+
+int radeon_object_create(struct radeon_device *rdev,
+ struct drm_gem_object *gobj,
+ unsigned long size,
+ bool kernel,
+ uint32_t domain,
+ bool interruptible,
+ struct radeon_object **robj_ptr)
+{
+ struct radeon_object *robj;
+ enum ttm_bo_type type;
+ uint32_t flags;
+ int r;
+
+ if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+ }
+ if (kernel) {
+ type = ttm_bo_type_kernel;
+ } else {
+ type = ttm_bo_type_device;
+ }
+ *robj_ptr = NULL;
+ robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
+ if (robj == NULL) {
+ return -ENOMEM;
+ }
+ robj->rdev = rdev;
+ robj->gobj = gobj;
+ INIT_LIST_HEAD(&robj->list);
+
+ flags = radeon_object_flags_from_domain(domain);
+ r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
+ 0, 0, false, NULL, size,
+ &radeon_ttm_object_object_destroy);
+ if (unlikely(r != 0)) {
+ /* ttm call radeon_ttm_object_object_destroy if error happen */
+ DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
+ size, flags, 0);
+ return r;
+ }
+ *robj_ptr = robj;
+ if (gobj) {
+ list_add_tail(&robj->list, &rdev->gem.objects);
+ }
+ return 0;
+}
+
+int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+{
+ int r;
+
+ spin_lock(&robj->tobj.lock);
+ if (robj->kptr) {
+ if (ptr) {
+ *ptr = robj->kptr;
+ }
+ spin_unlock(&robj->tobj.lock);
+ return 0;
+ }
+ spin_unlock(&robj->tobj.lock);
+ r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+ if (r) {
+ return r;
+ }
+ spin_lock(&robj->tobj.lock);
+ robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
+ spin_unlock(&robj->tobj.lock);
+ if (ptr) {
+ *ptr = robj->kptr;
+ }
+ return 0;
+}
+
+void radeon_object_kunmap(struct radeon_object *robj)
+{
+ spin_lock(&robj->tobj.lock);
+ if (robj->kptr == NULL) {
+ spin_unlock(&robj->tobj.lock);
+ return;
+ }
+ robj->kptr = NULL;
+ spin_unlock(&robj->tobj.lock);
+ ttm_bo_kunmap(&robj->kmap);
+}
+
+void radeon_object_unref(struct radeon_object **robj)
+{
+ struct ttm_buffer_object *tobj;
+
+ if ((*robj) == NULL) {
+ return;
+ }
+ tobj = &((*robj)->tobj);
+ ttm_bo_unref(&tobj);
+ if (tobj == NULL) {
+ *robj = NULL;
+ }
+}
+
+int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
+{
+ *offset = robj->tobj.addr_space_offset;
+ return 0;
+}
+
+int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
+ uint64_t *gpu_addr)
+{
+ uint32_t flags;
+ uint32_t tmp;
+ void *fbptr;
+ int r;
+
+ flags = radeon_object_flags_from_domain(domain);
+ spin_lock(&robj->tobj.lock);
+ if (robj->pin_count) {
+ robj->pin_count++;
+ if (gpu_addr != NULL) {
+ *gpu_addr = robj->gpu_addr;
+ }
+ spin_unlock(&robj->tobj.lock);
+ return 0;
+ }
+ spin_unlock(&robj->tobj.lock);
+ r = radeon_object_reserve(robj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
+ return r;
+ }
+ if (robj->rdev->fbdev_robj == robj) {
+ mutex_lock(&robj->rdev->fbdev_info->lock);
+ radeon_object_kunmap(robj);
+ }
+ tmp = robj->tobj.mem.placement;
+ ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
+ robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
+ r = ttm_buffer_object_validate(&robj->tobj,
+ robj->tobj.proposed_placement,
+ false, false);
+ radeon_object_gpu_addr(robj);
+ if (gpu_addr != NULL) {
+ *gpu_addr = robj->gpu_addr;
+ }
+ robj->pin_count = 1;
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to pin object.\n");
+ }
+ radeon_object_unreserve(robj);
+ if (robj->rdev->fbdev_robj == robj) {
+ if (!r) {
+ r = radeon_object_kmap(robj, &fbptr);
+ }
+ if (!r) {
+ robj->rdev->fbdev_info->screen_base = fbptr;
+ robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
+ }
+ mutex_unlock(&robj->rdev->fbdev_info->lock);
+ }
+ return r;
+}
+
+void radeon_object_unpin(struct radeon_object *robj)
+{
+ uint32_t flags;
+ void *fbptr;
+ int r;
+
+ spin_lock(&robj->tobj.lock);
+ if (!robj->pin_count) {
+ spin_unlock(&robj->tobj.lock);
+ printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
+ return;
+ }
+ robj->pin_count--;
+ if (robj->pin_count) {
+ spin_unlock(&robj->tobj.lock);
+ return;
+ }
+ spin_unlock(&robj->tobj.lock);
+ r = radeon_object_reserve(robj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
+ return;
+ }
+ if (robj->rdev->fbdev_robj == robj) {
+ mutex_lock(&robj->rdev->fbdev_info->lock);
+ radeon_object_kunmap(robj);
+ }
+ flags = robj->tobj.mem.placement;
+ robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
+ r = ttm_buffer_object_validate(&robj->tobj,
+ robj->tobj.proposed_placement,
+ false, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to unpin buffer.\n");
+ }
+ radeon_object_unreserve(robj);
+ if (robj->rdev->fbdev_robj == robj) {
+ if (!r) {
+ r = radeon_object_kmap(robj, &fbptr);
+ }
+ if (!r) {
+ robj->rdev->fbdev_info->screen_base = fbptr;
+ robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
+ }
+ mutex_unlock(&robj->rdev->fbdev_info->lock);
+ }
+}
+
+int radeon_object_wait(struct radeon_object *robj)
+{
+ int r = 0;
+
+ /* FIXME: should use block reservation instead */
+ r = radeon_object_reserve(robj, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object for waiting.\n");
+ return r;
+ }
+ spin_lock(&robj->tobj.lock);
+ if (robj->tobj.sync_obj) {
+ r = ttm_bo_wait(&robj->tobj, true, false, false);
+ }
+ spin_unlock(&robj->tobj.lock);
+ radeon_object_unreserve(robj);
+ return r;
+}
+
+int radeon_object_evict_vram(struct radeon_device *rdev)
+{
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* Useless to evict on IGP chips */
+ return 0;
+ }
+ return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+}
+
+void radeon_object_force_delete(struct radeon_device *rdev)
+{
+ struct radeon_object *robj, *n;
+ struct drm_gem_object *gobj;
+
+ if (list_empty(&rdev->gem.objects)) {
+ return;
+ }
+ DRM_ERROR("Userspace still has active objects !\n");
+ list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+ mutex_lock(&rdev->ddev->struct_mutex);
+ gobj = robj->gobj;
+ DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
+ gobj, robj, (unsigned long)gobj->size,
+ *((unsigned long *)&gobj->refcount));
+ list_del_init(&robj->list);
+ radeon_object_unref(&robj);
+ gobj->driver_private = NULL;
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ }
+}
+
+int radeon_object_init(struct radeon_device *rdev)
+{
+ return radeon_ttm_init(rdev);
+}
+
+void radeon_object_fini(struct radeon_device *rdev)
+{
+ radeon_ttm_fini(rdev);
+}
+
+void radeon_object_list_add_object(struct radeon_object_list *lobj,
+ struct list_head *head)
+{
+ if (lobj->wdomain) {
+ list_add(&lobj->list, head);
+ } else {
+ list_add_tail(&lobj->list, head);
+ }
+}
+
+int radeon_object_list_reserve(struct list_head *head)
+{
+ struct radeon_object_list *lobj;
+ struct list_head *i;
+ int r;
+
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ if (!lobj->robj->pin_count) {
+ r = radeon_object_reserve(lobj->robj, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object.\n");
+ return r;
+ }
+ } else {
+ }
+ }
+ return 0;
+}
+
+void radeon_object_list_unreserve(struct list_head *head)
+{
+ struct radeon_object_list *lobj;
+ struct list_head *i;
+
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ if (!lobj->robj->pin_count) {
+ radeon_object_unreserve(lobj->robj);
+ } else {
+ }
+ }
+}
+
+int radeon_object_list_validate(struct list_head *head, void *fence)
+{
+ struct radeon_object_list *lobj;
+ struct radeon_object *robj;
+ struct radeon_fence *old_fence = NULL;
+ struct list_head *i;
+ uint32_t flags;
+ int r;
+
+ r = radeon_object_list_reserve(head);
+ if (unlikely(r != 0)) {
+ radeon_object_list_unreserve(head);
+ return r;
+ }
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ robj = lobj->robj;
+ if (lobj->wdomain) {
+ flags = radeon_object_flags_from_domain(lobj->wdomain);
+ flags |= TTM_PL_FLAG_TT;
+ } else {
+ flags = radeon_object_flags_from_domain(lobj->rdomain);
+ flags |= TTM_PL_FLAG_TT;
+ flags |= TTM_PL_FLAG_VRAM;
+ }
+ if (!robj->pin_count) {
+ robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
+ r = ttm_buffer_object_validate(&robj->tobj,
+ robj->tobj.proposed_placement,
+ true, false);
+ if (unlikely(r)) {
+ radeon_object_list_unreserve(head);
+ DRM_ERROR("radeon: failed to validate.\n");
+ return r;
+ }
+ radeon_object_gpu_addr(robj);
+ }
+ lobj->gpu_offset = robj->gpu_addr;
+ if (fence) {
+ old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
+ robj->tobj.sync_obj = radeon_fence_ref(fence);
+ robj->tobj.sync_obj_arg = NULL;
+ }
+ if (old_fence) {
+ radeon_fence_unref(&old_fence);
+ }
+ }
+ return 0;
+}
+
+void radeon_object_list_unvalidate(struct list_head *head)
+{
+ struct radeon_object_list *lobj;
+ struct radeon_fence *old_fence = NULL;
+ struct list_head *i;
+
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
+ lobj->robj->tobj.sync_obj = NULL;
+ if (old_fence) {
+ radeon_fence_unref(&old_fence);
+ }
+ }
+ radeon_object_list_unreserve(head);
+}
+
+void radeon_object_list_clean(struct list_head *head)
+{
+ radeon_object_list_unreserve(head);
+}
+
+int radeon_object_fbdev_mmap(struct radeon_object *robj,
+ struct vm_area_struct *vma)
+{
+ return ttm_fbdev_mmap(vma, &robj->tobj);
+}
+
+unsigned long radeon_object_size(struct radeon_object *robj)
+{
+ return robj->tobj.num_pages << PAGE_SHIFT;
+}