summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-10-19 09:48:34 +1000
committerDave Airlie <airlied@redhat.com>2010-10-19 09:48:34 +1000
commitb7ae5056c94a8191c1fd0b5697707377516c0c5d (patch)
tree394f68003ea0bc2a0c24e510a7c5b13f479ce743 /drivers/gpu/drm/ttm
parent2126d0a4a205e2d6b763434f892524cd60f74228 (diff)
parent6a2a11dbea5db417d200d38dda53c30a2e5603e0 (diff)
Merge branch 'drm-fixes' of /home/airlied/kernel/linux-2.6 into drm-core-next
Conflicts: drivers/gpu/drm/i915/intel_fb.c drivers/gpu/drm/radeon/r600_blit_kms.c drivers/gpu/drm/ttm/ttm_bo.c
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c79
1 files changed, 70 insertions, 9 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index af7b57a47fbc..1e9bb2156dcf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -439,6 +439,42 @@ out_err:
}
/**
+ * Call bo::reserved and with the lru lock held.
+ * Will release GPU memory type usage on destruction.
+ * This is the place to put in driver specific hooks.
+ * Will release the bo::reserved lock and the
+ * lru lock on exit.
+ */
+
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+
+ if (bo->ttm) {
+
+ /**
+ * Release the lru_lock, since we don't want to have
+ * an atomic requirement on ttm_tt[unbind|destroy].
+ */
+
+ spin_unlock(&glob->lru_lock);
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ spin_lock(&glob->lru_lock);
+ }
+
+ if (bo->mem.mm_node) {
+ ttm_bo_mem_put(bo, &bo->mem);
+ }
+
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ spin_unlock(&glob->lru_lock);
+}
+
+
+/**
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, and already on delayed list, do nothing.
* If not idle, and not on delayed list, put on delayed list,
@@ -453,6 +489,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
int ret;
spin_lock(&bo->lock);
+retry:
(void) ttm_bo_wait(bo, false, false, !remove_all);
if (!bo->sync_obj) {
@@ -461,28 +498,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
+ ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
+
+ /**
+ * Someone else has the object reserved. Bail and retry.
+ */
+
+ if (unlikely(ret == -EBUSY)) {
+ spin_unlock(&glob->lru_lock);
+ spin_lock(&bo->lock);
+ goto requeue;
+ }
+
+ /**
+ * We can re-check for sync object without taking
+ * the bo::lock since setting the sync object requires
+ * also bo::reserved. A busy object at this point may
+ * be caused by another thread starting an accelerated
+ * eviction.
+ */
+
+ if (unlikely(bo->sync_obj)) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ spin_unlock(&glob->lru_lock);
+ spin_lock(&bo->lock);
+ if (remove_all)
+ goto retry;
+ else
+ goto requeue;
+ }
- ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
- BUG_ON(ret);
- if (bo->ttm)
- ttm_tt_unbind(bo->ttm);
+ put_count = ttm_bo_del_from_lru(bo);
if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy);
++put_count;
}
- spin_unlock(&glob->lru_lock);
- ttm_bo_mem_put(bo, &bo->mem);
- atomic_set(&bo->reserved, 0);
+ ttm_bo_cleanup_memtype_use(bo);
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
return 0;
}
-
+requeue:
spin_lock(&glob->lru_lock);
if (list_empty(&bo->ddestroy)) {
void *sync_obj = bo->sync_obj;