summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/radeon_ring.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-06-05 14:42:42 +0200
committerDave Airlie <airlied@redhat.com>2009-06-15 12:01:53 +1000
commit771fe6b912fca54f03e8a72eb63058b582775362 (patch)
tree58aa5469ba8058c2b564d50807395ad6cd7bd7e4 /drivers/gpu/drm/radeon/radeon_ring.c
parentba4e7d973dd09b66912ac4c0856add8b0703a997 (diff)
drm/radeon: introduce kernel modesetting for radeon hardware
Add kernel modesetting support to radeon driver, use the ttm memory manager to manage memory and DRM/GEM to provide userspace API. In order to avoid backward compatibility issue and to allow clean design and code the radeon kernel modesetting use different code path than old radeon/drm driver. When kernel modesetting is enabled the IOCTL of radeon/drm driver are considered as invalid and an error message is printed in the log and they return failure. KMS enabled userspace will use new API to talk with the radeon/drm driver. The new API provide functions to create/destroy/share/mmap buffer object which are then managed by the kernel memory manager (here TTM). In order to submit command to the GPU the userspace provide a buffer holding the command stream, along this buffer userspace have to provide a list of buffer object used by the command stream. The kernel radeon driver will then place buffer in GPU accessible memory and will update command stream to reflect the position of the different buffers. The kernel will also perform security check on command stream provided by the user, we want to catch and forbid any illegal use of the GPU such as DMA into random system memory or into memory not owned by the process supplying the command stream. This part of the code is still incomplete and this why we propose that patch as a staging driver addition, future security might forbid current experimental userspace to run. This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX (radeon up to X1950). Works is underway to provide support for R6XX, R7XX and newer hardware (radeon from HD2XXX to HD4XXX). Authors: Jerome Glisse <jglisse@redhat.com> Dave Airlie <airlied@redhat.com> Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c485
1 files changed, 485 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644
index 000000000000..a853261d1881
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+int radeon_debugfs_ib_init(struct radeon_device *rdev);
+
+/*
+ * IB.
+ */
+int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+{
+ struct radeon_fence *fence;
+ struct radeon_ib *nib;
+ unsigned long i;
+ int r = 0;
+
+ *ib = NULL;
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ DRM_ERROR("failed to create fence for new IB\n");
+ return r;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
+ i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ if (i < RADEON_IB_POOL_SIZE) {
+ set_bit(i, rdev->ib_pool.alloc_bm);
+ rdev->ib_pool.ibs[i].length_dw = 0;
+ *ib = &rdev->ib_pool.ibs[i];
+ goto out;
+ }
+ if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
+ /* we go do nothings here */
+ DRM_ERROR("all IB allocated none scheduled.\n");
+ r = -EINVAL;
+ goto out;
+ }
+ /* get the first ib on the scheduled list */
+ nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
+ struct radeon_ib, list);
+ if (nib->fence == NULL) {
+ /* we go do nothings here */
+ DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
+ r = -EINVAL;
+ goto out;
+ }
+ r = radeon_fence_wait(nib->fence, false);
+ if (r) {
+ DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
+ (unsigned long)nib->gpu_addr, nib->length_dw);
+ DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
+ goto out;
+ }
+ radeon_fence_unref(&nib->fence);
+ nib->length_dw = 0;
+ list_del(&nib->list);
+ INIT_LIST_HEAD(&nib->list);
+ *ib = nib;
+out:
+ mutex_unlock(&rdev->ib_pool.mutex);
+ if (r) {
+ radeon_fence_unref(&fence);
+ } else {
+ (*ib)->fence = fence;
+ }
+ return r;
+}
+
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+{
+ struct radeon_ib *tmp = *ib;
+
+ *ib = NULL;
+ if (tmp == NULL) {
+ return;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
+ /* IB is scheduled & not signaled don't do anythings */
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return;
+ }
+ list_del(&tmp->list);
+ INIT_LIST_HEAD(&tmp->list);
+ if (tmp->fence) {
+ radeon_fence_unref(&tmp->fence);
+ }
+ tmp->length_dw = 0;
+ clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+ mutex_unlock(&rdev->ib_pool.mutex);
+}
+
+static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ while ((ib->length_dw & rdev->cp.align_mask)) {
+ ib->ptr[ib->length_dw++] = PACKET2(0);
+ }
+}
+
+static void radeon_ib_cpu_flush(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ unsigned long tmp;
+ unsigned i;
+
+ /* To force CPU cache flush ugly but seems reliable */
+ for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
+ tmp = readl(&ib->ptr[i]);
+ }
+}
+
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ int r = 0;
+
+ mutex_lock(&rdev->ib_pool.mutex);
+ radeon_ib_align(rdev, ib);
+ radeon_ib_cpu_flush(rdev, ib);
+ if (!ib->length_dw || !rdev->cp.ready) {
+ /* TODO: Nothings in the ib we should report. */
+ mutex_unlock(&rdev->ib_pool.mutex);
+ DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+ return -EINVAL;
+ }
+ /* 64 dwords should be enought for fence too */
+ r = radeon_ring_lock(rdev, 64);
+ if (r) {
+ DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
+ radeon_ring_write(rdev, ib->gpu_addr);
+ radeon_ring_write(rdev, ib->length_dw);
+ radeon_fence_emit(rdev, ib->fence);
+ radeon_ring_unlock_commit(rdev);
+ list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
+}
+
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ void *ptr;
+ uint64_t gpu_addr;
+ int i;
+ int r = 0;
+
+ /* Allocate 1M object buffer */
+ INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
+ r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ true, RADEON_GEM_DOMAIN_GTT,
+ false, &rdev->ib_pool.robj);
+ if (r) {
+ DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
+ return r;
+ }
+ r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ if (r) {
+ DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
+ return r;
+ }
+ r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ unsigned offset;
+
+ offset = i * 64 * 1024;
+ rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
+ rdev->ib_pool.ibs[i].ptr = ptr + offset;
+ rdev->ib_pool.ibs[i].idx = i;
+ rdev->ib_pool.ibs[i].length_dw = 0;
+ INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+ }
+ bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ rdev->ib_pool.ready = true;
+ DRM_INFO("radeon: ib pool ready.\n");
+ if (radeon_debugfs_ib_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for IB !\n");
+ }
+ return r;
+}
+
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (!rdev->ib_pool.ready) {
+ return;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
+ bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ if (rdev->ib_pool.robj) {
+ radeon_object_kunmap(rdev->ib_pool.robj);
+ radeon_object_unref(&rdev->ib_pool.robj);
+ rdev->ib_pool.robj = NULL;
+ }
+ mutex_unlock(&rdev->ib_pool.mutex);
+}
+
+int radeon_ib_test(struct radeon_device *rdev)
+{
+ struct radeon_ib *ib;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ib_get(rdev, &ib);
+ if (r) {
+ return r;
+ }
+ ib->ptr[0] = PACKET0(scratch, 0);
+ ib->ptr[1] = 0xDEADBEEF;
+ ib->ptr[2] = PACKET2(0);
+ ib->ptr[3] = PACKET2(0);
+ ib->ptr[4] = PACKET2(0);
+ ib->ptr[5] = PACKET2(0);
+ ib->ptr[6] = PACKET2(0);
+ ib->ptr[7] = PACKET2(0);
+ ib->length_dw = 8;
+ r = radeon_ib_schedule(rdev, ib);
+ if (r) {
+ radeon_scratch_free(rdev, scratch);
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
+ r = radeon_fence_wait(ib->fence, false);
+ if (r) {
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test succeeded in %u usecs\n", i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+ radeon_scratch_free(rdev, scratch);
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+
+/*
+ * Ring.
+ */
+void radeon_ring_free_size(struct radeon_device *rdev)
+{
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ /* This works because ring_size is a power of 2 */
+ rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
+ rdev->cp.ring_free_dw -= rdev->cp.wptr;
+ rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
+ if (!rdev->cp.ring_free_dw) {
+ rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ }
+}
+
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+ int r;
+
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+ mutex_lock(&rdev->cp.mutex);
+ while (ndw > (rdev->cp.ring_free_dw - 1)) {
+ radeon_ring_free_size(rdev);
+ if (ndw < rdev->cp.ring_free_dw) {
+ break;
+ }
+ r = radeon_fence_wait_next(rdev);
+ if (r) {
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ }
+ rdev->cp.count_dw = ndw;
+ rdev->cp.wptr_old = rdev->cp.wptr;
+ return 0;
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+ unsigned count_dw_pad;
+ unsigned i;
+
+ /* We pad to match fetch size */
+ count_dw_pad = (rdev->cp.align_mask + 1) -
+ (rdev->cp.wptr & rdev->cp.align_mask);
+ for (i = 0; i < count_dw_pad; i++) {
+ radeon_ring_write(rdev, PACKET2(0));
+ }
+ DRM_MEMORYBARRIER();
+ WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+ (void)RREG32(RADEON_CP_RB_WPTR);
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+void radeon_ring_unlock_undo(struct radeon_device *rdev)
+{
+ rdev->cp.wptr = rdev->cp.wptr_old;
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+int radeon_ring_test(struct radeon_device *rdev)
+{
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, 2);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ radeon_scratch_free(rdev, scratch);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET0(scratch, 0));
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test succeeded in %d usecs\n", i);
+ } else {
+ DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+ radeon_scratch_free(rdev, scratch);
+ return r;
+}
+
+int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ int r;
+
+ rdev->cp.ring_size = ring_size;
+ /* Allocate ring buffer */
+ if (rdev->cp.ring_obj == NULL) {
+ r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
+ true,
+ RADEON_GEM_DOMAIN_GTT,
+ false,
+ &rdev->cp.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ r = radeon_object_pin(rdev->cp.ring_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.gpu_addr);
+ if (r) {
+ DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ r = radeon_object_kmap(rdev->cp.ring_obj,
+ (void **)&rdev->cp.ring);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ }
+ rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+ rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ return 0;
+}
+
+void radeon_ring_fini(struct radeon_device *rdev)
+{
+ mutex_lock(&rdev->cp.mutex);
+ if (rdev->cp.ring_obj) {
+ radeon_object_kunmap(rdev->cp.ring_obj);
+ radeon_object_unpin(rdev->cp.ring_obj);
+ radeon_object_unref(&rdev->cp.ring_obj);
+ rdev->cp.ring = NULL;
+ rdev->cp.ring_obj = NULL;
+ }
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct radeon_ib *ib = node->info_ent->data;
+ unsigned i;
+
+ if (ib == NULL) {
+ return 0;
+ }
+ seq_printf(m, "IB %04lu\n", ib->idx);
+ seq_printf(m, "IB fence %p\n", ib->fence);
+ seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ for (i = 0; i < ib->length_dw; i++) {
+ seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ }
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
+static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
+
+int radeon_debugfs_ib_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
+ radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
+ radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
+ radeon_debugfs_ib_list[i].driver_features = 0;
+ radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
+ }
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
+ RADEON_IB_POOL_SIZE);
+#else
+ return 0;
+#endif
+}