From 702adba22433c175e8429a47760f35ca16caf1cd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:28:29 +0000 Subject: drm/ttm/radeon/nouveau: Kill the bo lock in favour of a bo device fence_lock The bo lock used only to protect the bo sync object members, and since it is a per bo lock, fencing a buffer list will see a lot of locks and unlocks. Replace it with a per-device lock that protects the sync object members on *all* bos. Reading and setting these members will always be very quick, so the risc of heavy lock contention is microscopic. Note that waiting for sync objects will always take place outside of this lock. The bo device fence lock will eventually be replaced with a seqlock / rcu mechanism so we can determine that a bo is idle under a rcu / read seqlock. However this change will allow us to batch fencing and unreserving of buffers with a minimal amount of locking. Signed-off-by: Thomas Hellstrom Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1d067743fee0..e939cb6a91cc 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -369,11 +369,11 @@ void radeon_bo_list_fence(struct list_head *head, void *fence) list_for_each_entry(lobj, head, list) { bo = lobj->bo; - spin_lock(&bo->tbo.lock); + spin_lock(&bo->tbo.bdev->fence_lock); old_fence = (struct radeon_fence *)bo->tbo.sync_obj; bo->tbo.sync_obj = radeon_fence_ref(fence); bo->tbo.sync_obj_arg = NULL; - spin_unlock(&bo->tbo.lock); + spin_unlock(&bo->tbo.bdev->fence_lock); if (old_fence) { radeon_fence_unref(&old_fence); } -- cgit From 147666fb3b93b8c484f562da33a37f886ddff768 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:38:32 +0000 Subject: drm/radeon: Use the ttm execbuf utilities Rather than re-implementing in the Radeon driver, Use the execbuf / cs / pushbuf utilities that comes with TTM. This comes with an even greater benefit now that many spinlocks have been optimized away... Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 55 +++------------------------------- 1 file changed, 4 insertions(+), 51 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index e939cb6a91cc..a8594d289bcf 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -293,34 +293,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head) { if (lobj->wdomain) { - list_add(&lobj->list, head); + list_add(&lobj->tv.head, head); } else { - list_add_tail(&lobj->list, head); - } -} - -int radeon_bo_list_reserve(struct list_head *head) -{ - struct radeon_bo_list *lobj; - int r; - - list_for_each_entry(lobj, head, list){ - r = radeon_bo_reserve(lobj->bo, false); - if (unlikely(r != 0)) - return r; - lobj->reserved = true; - } - return 0; -} - -void radeon_bo_list_unreserve(struct list_head *head) -{ - struct radeon_bo_list *lobj; - - list_for_each_entry(lobj, head, list) { - /* only unreserve object we successfully reserved */ - if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) - radeon_bo_unreserve(lobj->bo); + list_add_tail(&lobj->tv.head, head); } } @@ -331,14 +306,11 @@ int radeon_bo_list_validate(struct list_head *head) u32 domain; int r; - list_for_each_entry(lobj, head, list) { - lobj->reserved = false; - } - r = radeon_bo_list_reserve(head); + r = ttm_eu_reserve_buffers(head); if (unlikely(r != 0)) { return r; } - list_for_each_entry(lobj, head, list) { + list_for_each_entry(lobj, head, tv.head) { bo = lobj->bo; if (!bo->pin_count) { domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; @@ -361,25 +333,6 @@ int radeon_bo_list_validate(struct list_head *head) return 0; } -void radeon_bo_list_fence(struct list_head *head, void *fence) -{ - struct radeon_bo_list *lobj; - struct radeon_bo *bo; - struct radeon_fence *old_fence = NULL; - - list_for_each_entry(lobj, head, list) { - bo = lobj->bo; - spin_lock(&bo->tbo.bdev->fence_lock); - old_fence = (struct radeon_fence *)bo->tbo.sync_obj; - bo->tbo.sync_obj = radeon_fence_ref(fence); - bo->tbo.sync_obj_arg = NULL; - spin_unlock(&bo->tbo.bdev->fence_lock); - if (old_fence) { - radeon_fence_unref(&old_fence); - } - } -} - int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma) { -- cgit From 99ee7fac189893c90145a22b86bbcfdc98f69a9c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 23 Nov 2010 11:47:49 +1000 Subject: drm/radeon: add initial tracepoint support. this adds a bo create, and fence seq tracking tracepoints. This is just an initial set to play around with, we should investigate what others we need would be useful. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index a8594d289bcf..8bdf0ba2983a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -34,6 +34,7 @@ #include #include "radeon_drm.h" #include "radeon.h" +#include "radeon_trace.h" int radeon_ttm_init(struct radeon_device *rdev); @@ -137,6 +138,7 @@ retry: list_add_tail(&bo->list, &rdev->gem.objects); mutex_unlock(&bo->rdev->gem.mutex); } + trace_radeon_bo_create(bo); return 0; } -- cgit