summaryrefslogtreecommitdiff
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-07-27 13:49:02 +0200
committerAl Viro <viro@zeniv.linux.org.uk>2022-07-30 00:35:51 -0400
commitcf634d540a29018e8d69ab1befb7e08182bc6594 (patch)
tree0639c640ff8089a405323e96f33ae9cc7d21e4c3 /fs/dcache.c
parent40a3cb0d2314a41975aa385a74643878454f6eac (diff)
fs/dcache: Disable preemption on i_dir_seq write side on PREEMPT_RT
i_dir_seq is a sequence counter with a lock which is represented by the lowest bit. The writer atomically updates the counter which ensures that it can be modified by only one writer at a time. This requires preemption to be disabled across the write side critical section. On !PREEMPT_RT kernels this is implicit by the caller acquiring dentry::lock. On PREEMPT_RT kernels spin_lock() does not disable preemption which means that a preempting writer or reader would live lock. It's therefore required to disable preemption explicitly. An alternative solution would be to replace i_dir_seq with a seqlock_t for PREEMPT_RT, but that comes with its own set of problems due to arbitrary lock nesting. A pure sequence count with an associated spinlock is not possible because the locks held by the caller are not necessarily related. As the critical section is small, disabling preemption is a sensible solution. Reported-by: Oleg.Karfich@wago.com Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://lkml.kernel.org/r/20220613140712.77932-2-bigeasy@linutronix.de Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 94b737311b61..71969505e82e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2564,7 +2564,15 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
-
+ /*
+ * The caller holds a spinlock (dentry::d_lock). On !PREEMPT_RT
+ * kernels spin_lock() implicitly disables preemption, but not on
+ * PREEMPT_RT. So for RT it has to be done explicitly to protect
+ * the sequence count write side critical section against a reader
+ * or another writer preempting, which would result in a live lock.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
for (;;) {
unsigned n = dir->i_dir_seq;
if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
@@ -2576,6 +2584,8 @@ static inline unsigned start_dir_add(struct inode *dir)
static inline void end_dir_add(struct inode *dir, unsigned n)
{
smp_store_release(&dir->i_dir_seq, n + 2);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
static void d_wait_lookup(struct dentry *dentry)