summaryrefslogtreecommitdiff
path: root/include/linux/cpuidle.h
diff options
context:
space:
mode:
authorPrashanth Prakash <pprakash@codeaurora.org>2017-11-15 10:11:49 -0700
committerCatalin Marinas <catalin.marinas@arm.com>2018-01-02 13:48:55 +0000
commitdb50a74d8193944dd1ee488fd2a813a364fbbaa7 (patch)
treed67d11acbee21cbc202701cba4150ccb2b659a20 /include/linux/cpuidle.h
parent1f911c3a1140e1668e68791fb6dd07757e2f3956 (diff)
cpuidle: Add new macro to enter a retention idle state
If a CPU is entering a low power idle state where it doesn't lose any context, then there is no need to call cpu_pm_enter()/cpu_pm_exit(). Add a new macro(CPU_PM_CPU_IDLE_ENTER_RETENTION) to be used by cpuidle drivers when they are entering retention state. By not calling cpu_pm_enter and cpu_pm_exit we reduce the latency involved in entering and exiting the retention idle states. CPU_PM_CPU_IDLE_ENTER_RETENTION assumes that no state is lost and hence CPU PM notifiers will not be called. We may need a broader change if we need to support partial retention states effeciently. On ARM64 based Qualcomm Server Platform we measured below overhead for for calling cpu_pm_enter and cpu_pm_exit for retention states. workload: stress --hdd #CPUs --hdd-bytes 32M -t 30 Average overhead of cpu_pm_enter - 1.2us Average overhead of cpu_pm_exit - 3.1us Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Sudeep Holla <sudeep.holla@arm.com> Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'include/linux/cpuidle.h')
-rw-r--r--include/linux/cpuidle.h40
1 files changed, 24 insertions, 16 deletions
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f7788d23b57..871f9e21810c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif
-#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
-({ \
- int __ret; \
- \
- if (!idx) { \
- cpu_do_idle(); \
- return idx; \
- } \
- \
- __ret = cpu_pm_enter(); \
- if (!__ret) { \
- __ret = low_level_idle_enter(idx); \
- cpu_pm_exit(); \
- } \
- \
- __ret ? -1 : idx; \
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
+({ \
+ int __ret = 0; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ if (!is_retention) \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ __ret = low_level_idle_enter(idx); \
+ if (!is_retention) \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
})
+#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
+
#endif /* _LINUX_CPUIDLE_H */