summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/radeon_ring.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-15 11:48:34 -0500
committerDave Airlie <airlied@redhat.com>2011-12-20 19:52:12 +0000
commitb15ba51207e54245409d6f46e20dab36f906eed1 (patch)
treeb34a37395efa5be0a9af4ba5b627e43813713ed3 /drivers/gpu/drm/radeon/radeon_ring.c
parent1b37078b7ddf35cab12ac6544187e3636d50c0dc (diff)
drm/radeon: introduce a sub allocator and convert ib pool to it v4
Somewhat specializaed sub-allocator designed to perform sub-allocation for command buffer not only for current cs ioctl but for future command submission ioctl as well. Patch also convert current ib pool to use the sub allocator. Idea is that ib poll buffer can be share with other command buffer submission not having 64K granularity. v2 Harmonize pool handling and add suspend/resume callback to pin/unpin sa bo (tested on rv280, rv370, r420, rv515, rv610, rv710, redwood, cayman, rs480, rs690, rs880) v3 Simplify allocator v4 Fix radeon_ib_get error path to properly free fence Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c254
1 files changed, 99 insertions, 155 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 133e2636cea0..f6a4fbd102a0 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -74,92 +74,90 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
ring->ring_free_dw--;
}
-void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
-{
- struct radeon_ib *ib, *n;
-
- list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
- list_del(&ib->list);
- vfree(ib->ptr);
- kfree(ib);
- }
-}
-
-void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+/*
+ * IB.
+ */
+static bool radeon_ib_try_free(struct radeon_device *rdev,
+ struct radeon_ib *ib)
{
- struct radeon_ib *bib;
-
- bib = kmalloc(sizeof(*bib), GFP_KERNEL);
- if (bib == NULL)
- return;
- bib->ptr = vmalloc(ib->length_dw * 4);
- if (bib->ptr == NULL) {
- kfree(bib);
- return;
+ bool done = false;
+
+ /* only free ib which have been emited */
+ if (ib->fence && ib->fence->emitted) {
+ if (radeon_fence_signaled(ib->fence)) {
+ radeon_fence_unref(&ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo);
+ done = true;
+ }
}
- memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
- bib->length_dw = ib->length_dw;
- mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
- mutex_unlock(&rdev->ib_pool.mutex);
+ return done;
}
-/*
- * IB.
- */
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
{
struct radeon_fence *fence;
- struct radeon_ib *nib;
- int r = 0, i, c;
+ unsigned cretry = 0;
+ int r = 0, i, idx;
*ib = NULL;
+
r = radeon_fence_create(rdev, &fence, ring);
if (r) {
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
+
mutex_lock(&rdev->ib_pool.mutex);
- for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
- i &= (RADEON_IB_POOL_SIZE - 1);
- if (rdev->ib_pool.ibs[i].free) {
- nib = &rdev->ib_pool.ibs[i];
- break;
- }
- }
- if (nib == NULL) {
- /* This should never happen, it means we allocated all
- * IB and haven't scheduled one yet, return EBUSY to
- * userspace hoping that on ioctl recall we get better
- * luck
- */
- dev_err(rdev->dev, "no free indirect buffer !\n");
+ idx = rdev->ib_pool.head_id;
+retry:
+ if (cretry > 5) {
+ dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
- return -EBUSY;
+ return -ENOMEM;
}
- rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- nib->free = false;
- if (nib->fence) {
- mutex_unlock(&rdev->ib_pool.mutex);
- r = radeon_fence_wait(nib->fence, false);
- if (r) {
- dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
- nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
- mutex_lock(&rdev->ib_pool.mutex);
- nib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+ cretry++;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
+ if (rdev->ib_pool.ibs[idx].fence == NULL) {
+ r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
+ &rdev->ib_pool.ibs[idx].sa_bo,
+ 64*1024, 64);
+ if (!r) {
+ *ib = &rdev->ib_pool.ibs[idx];
+ (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
+ (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ (*ib)->gpu_addr += (*ib)->sa_bo.offset;
+ (*ib)->fence = fence;
+ /* ib are most likely to be allocated in a ring fashion
+ * thus rdev->ib_pool.head_id should be the id of the
+ * oldest ib
+ */
+ rdev->ib_pool.head_id = (1 + idx);
+ rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
+ }
}
- mutex_lock(&rdev->ib_pool.mutex);
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ }
+ /* this should be rare event, ie all ib scheduled none signaled yet.
+ */
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ if (rdev->ib_pool.ibs[idx].fence) {
+ r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
+ if (!r) {
+ goto retry;
+ }
+ /* an error happened */
+ break;
+ }
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
}
- radeon_fence_unref(&nib->fence);
- nib->fence = fence;
- nib->length_dw = 0;
mutex_unlock(&rdev->ib_pool.mutex);
- *ib = nib;
- return 0;
+ radeon_fence_unref(&fence);
+ return r;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -170,10 +168,11 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- if (!tmp->fence->emitted)
- radeon_fence_unref(&tmp->fence);
mutex_lock(&rdev->ib_pool.mutex);
- tmp->free = true;
+ if (tmp->fence && !tmp->fence->emitted) {
+ radeon_sa_bo_free(rdev, &tmp->sa_bo);
+ radeon_fence_unref(&tmp->fence);
+ }
mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -196,94 +195,73 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
}
radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
radeon_fence_emit(rdev, ib->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- /* once scheduled IB is considered free and protected by the fence */
- ib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- void *ptr;
- uint64_t gpu_addr;
- int i;
- int r = 0;
+ int i, r;
- if (rdev->ib_pool.robj)
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (rdev->ib_pool.ready) {
+ mutex_unlock(&rdev->ib_pool.mutex);
return 0;
- INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
- /* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
- PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
- &rdev->ib_pool.robj);
- if (r) {
- DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
- return r;
}
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->ib_pool.robj);
- DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
- return r;
- }
- r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
- radeon_bo_unreserve(rdev->ib_pool.robj);
+
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GEM_DOMAIN_GTT);
if (r) {
- DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
+ mutex_unlock(&rdev->ib_pool.mutex);
return r;
}
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- unsigned offset;
- offset = i * 64 * 1024;
- rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
- rdev->ib_pool.ibs[i].ptr = ptr + offset;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ rdev->ib_pool.ibs[i].fence = NULL;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- rdev->ib_pool.ibs[i].free = true;
+ INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
}
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
+
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
}
if (radeon_debugfs_ring_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
- return r;
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- int r;
- struct radeon_bo *robj;
+ unsigned i;
- if (!rdev->ib_pool.ready) {
- return;
- }
mutex_lock(&rdev->ib_pool.mutex);
- radeon_ib_bogus_cleanup(rdev);
- robj = rdev->ib_pool.robj;
- rdev->ib_pool.robj = NULL;
- mutex_unlock(&rdev->ib_pool.mutex);
-
- if (robj) {
- r = radeon_bo_reserve(robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(robj);
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
+ if (rdev->ib_pool.ready) {
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+ radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
}
- radeon_bo_unref(&robj);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
+ rdev->ib_pool.ready = false;
}
+ mutex_unlock(&rdev->ib_pool.mutex);
}
+int radeon_ib_pool_start(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+}
+
+int radeon_ib_pool_suspend(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+}
/*
* Ring.
@@ -509,37 +487,8 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
return 0;
}
-static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_device *rdev = node->info_ent->data;
- struct radeon_ib *ib;
- unsigned i;
-
- mutex_lock(&rdev->ib_pool.mutex);
- if (list_empty(&rdev->ib_pool.bogus_ib)) {
- mutex_unlock(&rdev->ib_pool.mutex);
- seq_printf(m, "no bogus IB recorded\n");
- return 0;
- }
- ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
- list_del_init(&ib->list);
- mutex_unlock(&rdev->ib_pool.mutex);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
- }
- vfree(ib->ptr);
- kfree(ib);
- return 0;
-}
-
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
-
-static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
- {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
-};
#endif
int radeon_debugfs_ring_init(struct radeon_device *rdev)
@@ -556,12 +505,7 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
- int r;
- radeon_debugfs_ib_bogus_info_list[0].data = rdev;
- r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
- if (r)
- return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];