summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_evict.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_evict.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c137
1 files changed, 74 insertions, 63 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1d0cc6..da05a2692a75 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -30,30 +30,41 @@
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
+#include "i915_trace.h"
static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
- struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
- list_add(&obj_priv->evict_list, unwind);
- drm_gem_object_reference(&obj_priv->base);
- return drm_mm_scan_add_block(obj_priv->gtt_space);
+ list_add(&obj->exec_list, unwind);
+ drm_gem_object_reference(&obj->base);
+ return drm_mm_scan_add_block(obj->gtt_space);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
+ if (mappable) {
+ if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0))
+ return 0;
+ } else {
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+ }
+
+ trace_i915_gem_evict(dev, min_size, alignment, mappable);
/*
* The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +90,56 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- if (mark_free(obj_priv, &unwind_list))
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj_priv->base.write_domain || obj_priv->pin_count)
+ if (obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- if (obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+ if (obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (! obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
- list_for_each_entry(obj_priv, &unwind_list, evict_list) {
- ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ while (!list_empty(&unwind_list)) {
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
- drm_gem_object_unreference(&obj_priv->base);
+
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +153,34 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
- obj_priv = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
- evict_list);
- if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- list_move(&obj_priv->evict_list, &eviction_list);
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ list_move(&obj->exec_list, &eviction_list);
continue;
}
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- obj_priv = list_first_entry(&eviction_list,
- struct drm_i915_gem_object,
- evict_list);
+ obj = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(&obj_priv->base);
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ ret = i915_gem_object_unbind(obj);
+
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
@@ -169,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev)
if (lists_empty)
return -ENOSPC;
+ trace_i915_gem_evict_everything(dev, purgeable_only);
+
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)
@@ -176,36 +201,22 @@ i915_gem_evict_everything(struct drm_device *dev)
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- BUG_ON(!lists_empty);
-
- return 0;
+ return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- mm_list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+ int ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
}
}