summaryrefslogtreecommitdiff
path: root/drivers/hwspinlock/hwspinlock_core.c
diff options
context:
space:
mode:
authorFabien Dessenne <fabien.dessenne@st.com>2019-03-07 16:58:23 +0100
committerBjorn Andersson <bjorn.andersson@linaro.org>2019-06-29 21:08:14 -0700
commit360aa640a59f269b784848c0b2d6d462952750d9 (patch)
treecf5d41daeadb6f14a8c41637b7ae29ee05e6747a /drivers/hwspinlock/hwspinlock_core.c
parentbce6f5221374ba451a337d0a3773e6eb99dad3e8 (diff)
hwspinlock: add the 'in_atomic' API
Add the 'in_atomic' mode which can be called from an atomic context. This mode relies on the existing 'raw' mode (no lock, no preemption/irq disabling) with the difference that the timeout is not based on jiffies (jiffies won't increase when irq are disabled) but handled with busy-waiting udelay() calls. Signed-off-by: Fabien Dessenne <fabien.dessenne@st.com> Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Diffstat (limited to 'drivers/hwspinlock/hwspinlock_core.c')
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c43
1 files changed, 30 insertions, 13 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index d806307f19c2..8862445aa858 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -23,6 +24,9 @@
#include "hwspinlock_internal.h"
+/* retry delay used in atomic context */
+#define HWSPINLOCK_RETRY_DELAY_US 100
+
/* radix tree tags */
#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
@@ -68,11 +72,11 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
* user need some time-consuming or sleepable operations under the hardware
* lock, they need one sleepable lock (like mutex) to protect the operations.
*
- * If the mode is not HWLOCK_RAW, upon a successful return from this function,
- * preemption (and possibly interrupts) is disabled, so the caller must not
- * sleep, and is advised to release the hwspinlock as soon as possible. This is
- * required in order to minimize remote cores polling on the hardware
- * interconnect.
+ * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
+ * return from this function, preemption (and possibly interrupts) is disabled,
+ * so the caller must not sleep, and is advised to release the hwspinlock as
+ * soon as possible. This is required in order to minimize remote cores polling
+ * on the hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
@@ -112,6 +116,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
ret = spin_trylock_irq(&hwlock->lock);
break;
case HWLOCK_RAW:
+ case HWLOCK_IN_ATOMIC:
ret = 1;
break;
default:
@@ -136,6 +141,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
spin_unlock_irq(&hwlock->lock);
break;
case HWLOCK_RAW:
+ case HWLOCK_IN_ATOMIC:
/* Nothing to do */
break;
default:
@@ -179,11 +185,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
* user need some time-consuming or sleepable operations under the hardware
* lock, they need one sleepable lock (like mutex) to protect the operations.
*
- * If the mode is not HWLOCK_RAW, upon a successful return from this function,
- * preemption is disabled (and possibly local interrupts, too), so the caller
- * must not sleep, and is advised to release the hwspinlock as soon as possible.
- * This is required in order to minimize remote cores polling on the
- * hardware interconnect.
+ * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
+ * is handled with busy-waiting delays, hence shall not exceed few msecs.
+ *
+ * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
+ * return from this function, preemption (and possibly interrupts) is disabled,
+ * so the caller must not sleep, and is advised to release the hwspinlock as
+ * soon as possible. This is required in order to minimize remote cores polling
+ * on the hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
@@ -198,7 +207,7 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
int mode, unsigned long *flags)
{
int ret;
- unsigned long expire;
+ unsigned long expire, atomic_delay = 0;
expire = msecs_to_jiffies(to) + jiffies;
@@ -212,8 +221,15 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
* The lock is already taken, let's check if the user wants
* us to try again
*/
- if (time_is_before_eq_jiffies(expire))
- return -ETIMEDOUT;
+ if (mode == HWLOCK_IN_ATOMIC) {
+ udelay(HWSPINLOCK_RETRY_DELAY_US);
+ atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
+ if (atomic_delay > to * 1000)
+ return -ETIMEDOUT;
+ } else {
+ if (time_is_before_eq_jiffies(expire))
+ return -ETIMEDOUT;
+ }
/*
* Allow platform-specific relax handlers to prevent
@@ -276,6 +292,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
spin_unlock_irq(&hwlock->lock);
break;
case HWLOCK_RAW:
+ case HWLOCK_IN_ATOMIC:
/* Nothing to do */
break;
default: