diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-20 09:40:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-20 09:40:49 -0800 |
commit | 4b84a4c8d40dfbfe1becec13a6e373e871e103e9 (patch) | |
tree | fb6ca5bbe6c42205de69498d9be4b95832905a80 /lib | |
parent | d5829524243652409e3fa2853736649674c294f0 (diff) | |
parent | c859df526b203497227b2b16c9bebcede67221e4 (diff) |
Merge tag 'vfs-6.14-rc1.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull misc vfs updates from Christian Brauner:
"Features:
- Support caching symlink lengths in inodes
The size is stored in a new union utilizing the same space as
i_devices, thus avoiding growing the struct or taking up any more
space
When utilized it dodges strlen() in vfs_readlink(), giving about
1.5% speed up when issuing readlink on /initrd.img on ext4
- Add RWF_DONTCACHE iocb and FOP_DONTCACHE file_operations flag
If a file system supports uncached buffered IO, it may set
FOP_DONTCACHE and enable support for RWF_DONTCACHE.
If RWF_DONTCACHE is attempted without the file system supporting
it, it'll get errored with -EOPNOTSUPP
- Enable VBOXGUEST and VBOXSF_FS on ARM64
Now that VirtualBox is able to run as a host on arm64 (e.g. the
Apple M3 processors) we can enable VBOXSF_FS (and in turn
VBOXGUEST) for this architecture.
Tested with various runs of bonnie++ and dbench on an Apple MacBook
Pro with the latest Virtualbox 7.1.4 r165100 installed
Cleanups:
- Delay sysctl_nr_open check in expand_files()
- Use kernel-doc includes in fiemap docbook
- Use page->private instead of page->index in watch_queue
- Use a consume fence in mnt_idmap() as it's heavily used in
link_path_walk()
- Replace magic number 7 with ARRAY_SIZE() in fc_log
- Sort out a stale comment about races between fd alloc and dup2()
- Fix return type of do_mount() from long to int
- Various cosmetic cleanups for the lockref code
Fixes:
- Annotate spinning as unlikely() in __read_seqcount_begin
The annotation already used to be there, but got lost in commit
52ac39e5db51 ("seqlock: seqcount_t: Implement all read APIs as
statement expressions")
- Fix proc_handler for sysctl_nr_open
- Flush delayed work in delayed fput()
- Fix grammar and spelling in propagate_umount()
- Fix ESP not readable during coredump
In /proc/PID/stat, there is the kstkesp field which is the stack
pointer of a thread. While the thread is active, this field reads
zero. But during a coredump, it should have a valid value
However, at the moment, kstkesp is zero even during coredump
- Don't wake up the writer if the pipe is still full
- Fix unbalanced user_access_end() in select code"
* tag 'vfs-6.14-rc1.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (28 commits)
gfs2: use lockref_init for qd_lockref
erofs: use lockref_init for pcl->lockref
dcache: use lockref_init for d_lockref
lockref: add a lockref_init helper
lockref: drop superfluous externs
lockref: use bool for false/true returns
lockref: improve the lockref_get_not_zero description
lockref: remove lockref_put_not_zero
fs: Fix return type of do_mount() from long to int
select: Fix unbalanced user_access_end()
vbox: Enable VBOXGUEST and VBOXSF_FS on ARM64
pipe_read: don't wake up the writer if the pipe is still full
selftests: coredump: Add stackdump test
fs/proc: do_task_stat: Fix ESP not readable during coredump
fs: add RWF_DONTCACHE iocb and FOP_DONTCACHE file_operations flag
fs: sort out a stale comment about races between fd alloc and dup2
fs: Fix grammar and spelling in propagate_umount()
fs: fc_log replace magic number 7 with ARRAY_SIZE()
fs: use a consume fence in mnt_idmap()
file: flush delayed work in delayed fput()
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/lockref.c | 60 |
1 files changed, 15 insertions, 45 deletions
diff --git a/lib/lockref.c b/lib/lockref.c index 2afe4c5d8919..5d8e3ef3860e 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -58,23 +58,22 @@ EXPORT_SYMBOL(lockref_get); * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count was zero */ -int lockref_get_not_zero(struct lockref *lockref) +bool lockref_get_not_zero(struct lockref *lockref) { - int retval; + bool retval = false; CMPXCHG_LOOP( new.count++; if (old.count <= 0) - return 0; + return false; , - return 1; + return true; ); spin_lock(&lockref->lock); - retval = 0; if (lockref->count > 0) { lockref->count++; - retval = 1; + retval = true; } spin_unlock(&lockref->lock); return retval; @@ -82,39 +81,11 @@ int lockref_get_not_zero(struct lockref *lockref) EXPORT_SYMBOL(lockref_get_not_zero); /** - * lockref_put_not_zero - Decrements count unless count <= 1 before decrement - * @lockref: pointer to lockref structure - * Return: 1 if count updated successfully or 0 if count would become zero - */ -int lockref_put_not_zero(struct lockref *lockref) -{ - int retval; - - CMPXCHG_LOOP( - new.count--; - if (old.count <= 1) - return 0; - , - return 1; - ); - - spin_lock(&lockref->lock); - retval = 0; - if (lockref->count > 1) { - lockref->count--; - retval = 1; - } - spin_unlock(&lockref->lock); - return retval; -} -EXPORT_SYMBOL(lockref_put_not_zero); - -/** * lockref_put_return - Decrement reference count if possible * @lockref: pointer to lockref structure * * Decrement the reference count and return the new value. - * If the lockref was dead or locked, return an error. + * If the lockref was dead or locked, return -1. */ int lockref_put_return(struct lockref *lockref) { @@ -134,22 +105,22 @@ EXPORT_SYMBOL(lockref_put_return); * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken */ -int lockref_put_or_lock(struct lockref *lockref) +bool lockref_put_or_lock(struct lockref *lockref) { CMPXCHG_LOOP( new.count--; if (old.count <= 1) break; , - return 1; + return true; ); spin_lock(&lockref->lock); if (lockref->count <= 1) - return 0; + return false; lockref->count--; spin_unlock(&lockref->lock); - return 1; + return true; } EXPORT_SYMBOL(lockref_put_or_lock); @@ -169,23 +140,22 @@ EXPORT_SYMBOL(lockref_mark_dead); * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if lockref was dead */ -int lockref_get_not_dead(struct lockref *lockref) +bool lockref_get_not_dead(struct lockref *lockref) { - int retval; + bool retval = false; CMPXCHG_LOOP( new.count++; if (old.count < 0) - return 0; + return false; , - return 1; + return true; ); spin_lock(&lockref->lock); - retval = 0; if (lockref->count >= 0) { lockref->count++; - retval = 1; + retval = true; } spin_unlock(&lockref->lock); return retval; |