summaryrefslogtreecommitdiff
path: root/kernel/locking/percpu-rwsem.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/percpu-rwsem.c')
-rw-r--r--kernel/locking/percpu-rwsem.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index aa2b118d2f88..969389df6eee 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -45,7 +45,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
}
EXPORT_SYMBOL_GPL(percpu_free_rwsem);
-int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
+bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
{
/*
* Due to having preemption disabled the decrement happens on
@@ -69,7 +69,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
* release in percpu_up_write().
*/
if (likely(!smp_load_acquire(&sem->readers_block)))
- return 1;
+ return true;
/*
* Per the above comment; we still have preemption disabled and
@@ -78,7 +78,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
__percpu_up_read(sem);
if (try)
- return 0;
+ return false;
/*
* We either call schedule() in the wait, or we'll fall through
@@ -94,7 +94,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
__up_read(&sem->rw_sem);
preempt_disable();
- return 1;
+ return true;
}
EXPORT_SYMBOL_GPL(__percpu_down_read);