summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_device.h
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2023-02-28 11:17:30 +0100
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-19 18:30:20 -0500
commit38c04b47cec861cf4007b3e53cbf584e494e2762 (patch)
treedca5894d0e97888aa1f04c25d3d4ff3a63c2756b /drivers/gpu/drm/xe/xe_device.h
parent044f0cfb19473cd1b60a69c802cac0651066fa21 (diff)
drm/xe: Use atomic instead of mutex for xe_device_mem_access_ongoing
xe_guc_ct_fast_path() is called from an irq context, and cannot lock the mutex used by xe_device_mem_access_ongoing(). Fortunately it is easy to fix, and the atomic guarantees are good enough to ensure xe->mem_access.hold_rpm is set before last ref is dropped. As far as I can tell, the runtime ref in device access should be killable, but don't dare to do it yet. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_device.h')
-rw-r--r--drivers/gpu/drm/xe/xe_device.h14
1 files changed, 4 insertions, 10 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 25c5087f5aad..d277f8985f7b 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -90,20 +90,14 @@ static inline struct xe_force_wake * gt_to_fw(struct xe_gt *gt)
void xe_device_mem_access_get(struct xe_device *xe);
void xe_device_mem_access_put(struct xe_device *xe);
-static inline void xe_device_assert_mem_access(struct xe_device *xe)
+static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
{
- XE_WARN_ON(!xe->mem_access.ref);
+ return atomic_read(&xe->mem_access.ref);
}
-static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
+static inline void xe_device_assert_mem_access(struct xe_device *xe)
{
- bool ret;
-
- mutex_lock(&xe->mem_access.lock);
- ret = xe->mem_access.ref;
- mutex_unlock(&xe->mem_access.lock);
-
- return ret;
+ XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
}
static inline bool xe_device_in_fault_mode(struct xe_device *xe)