summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_gem.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2015-10-15 11:33:43 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-10-19 11:00:44 +0200
commit2225cfe46bcc7558d9e371d1bc117df2df1fbacd (patch)
tree98a7a3dddd15dc3db44969155dd592b7254603ae /drivers/gpu/drm/drm_gem.c
parent3d57b42cabc8472ab63f0adc9529102314218f1e (diff)
drm/gem: Use kref_get_unless_zero for the weak mmap references
Compared to wrapping the final kref_put with dev->struct_mutex this allows us to only acquire the offset manager look both in the final cleanup and in the lookup. Which has the upside that no locks leak out of the core abstractions. But it means that we need to hold a temporary reference to the object while checking mmap constraints, to make sure the object doesn't disappear. Extended the critical region would have worked too, but would result in more leaky locking. Also, this is the final bit which required dev->struct_mutex in gem core, now modern drivers can be completely struct_mutex free! This needs a new drm_vma_offset_exact_lookup_locked and makes both drm_vma_offset_exact_lookup and drm_vma_offset_lookup unused. v2: Don't leak object references in failure paths (David). v3: Add a comment from Chris explaining how the ordering works, with the slight adjustment that I dropped any mention of struct_mutex since with this patch it's now immaterial ot core gem. Cc: David Herrmann <dh.herrmann@gmail.com> Reviewed-by: David Herrmann <dh.herrmann@gmail.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://mid.gmane.org/1444901623-18918-1-git-send-email-daniel.vetter@ffwll.ch Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r--drivers/gpu/drm/drm_gem.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ab8ea42264f4..64353d40db53 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -862,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
int ret;
if (drm_device_is_unplugged(dev))
return -ENODEV;
- mutex_lock(&dev->struct_mutex);
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (likely(node)) {
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ /*
+ * When the object is being freed, after it hits 0-refcnt it
+ * proceeds to tear down the object. In the process it will
+ * attempt to remove the VMA offset and so acquire this
+ * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
+ * that matches our range, we know it is in the process of being
+ * destroyed and will be freed as soon as we release the lock -
+ * so we have to check for the 0-refcnted object and treat it as
+ * invalid.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (!node) {
- mutex_unlock(&dev->struct_mutex);
+ if (!obj)
return -EINVAL;
- } else if (!drm_vma_node_is_allowed(node, filp)) {
- mutex_unlock(&dev->struct_mutex);
+
+ if (!drm_vma_node_is_allowed(node, filp)) {
+ drm_gem_object_unreference_unlocked(obj);
return -EACCES;
}
- obj = container_of(node, struct drm_gem_object, vma_node);
- ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
+ ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
+ vma);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}